xref: /linux/arch/s390/kvm/vsie.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * kvm nested virtualization support for s390x
4  *
5  * Copyright IBM Corp. 2016, 2018
6  *
7  *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
8  */
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_host.h>
11 #include <linux/bug.h>
12 #include <linux/list.h>
13 #include <linux/bitmap.h>
14 #include <linux/sched/signal.h>
15 #include <linux/io.h>
16 #include <linux/mman.h>
17 
18 #include <asm/mmu_context.h>
19 #include <asm/sclp.h>
20 #include <asm/nmi.h>
21 #include <asm/dis.h>
22 #include <asm/facility.h>
23 #include "kvm-s390.h"
24 #include "gaccess.h"
25 #include "gmap.h"
26 
27 enum vsie_page_flags {
28 	VSIE_PAGE_IN_USE = 0,
29 };
30 
31 struct vsie_page {
32 	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
33 	/*
34 	 * the backup info for machine check. ensure it's at
35 	 * the same offset as that in struct sie_page!
36 	 */
37 	struct mcck_volatile_info mcck_info;    /* 0x0200 */
38 	/*
39 	 * The pinned original scb. Be aware that other VCPUs can modify
40 	 * it while we read from it. Values that are used for conditions or
41 	 * are reused conditionally, should be accessed via READ_ONCE.
42 	 */
43 	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
44 	/*
45 	 * Flags: must be set/cleared atomically after the vsie page can be
46 	 * looked up by other CPUs.
47 	 */
48 	unsigned long flags;			/* 0x0220 */
49 	/* address of the last reported fault to guest2 */
50 	unsigned long fault_addr;		/* 0x0228 */
51 	/* calculated guest addresses of satellite control blocks */
52 	gpa_t sca_gpa;				/* 0x0230 */
53 	gpa_t itdba_gpa;			/* 0x0238 */
54 	gpa_t gvrd_gpa;				/* 0x0240 */
55 	gpa_t riccbd_gpa;			/* 0x0248 */
56 	gpa_t sdnx_gpa;				/* 0x0250 */
57 	/*
58 	 * guest address of the original SCB. Remains set for free vsie
59 	 * pages, so we can properly look them up in our addr_to_page
60 	 * radix tree.
61 	 */
62 	gpa_t scb_gpa;				/* 0x0258 */
63 	/* the shadow gmap in use by the vsie_page */
64 	struct gmap_cache gmap_cache;		/* 0x0260 */
65 	__u8 reserved[0x0700 - 0x0278];		/* 0x0278 */
66 	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
67 	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
68 };
69 
70 static_assert(sizeof(struct vsie_page) == PAGE_SIZE);
71 
72 /* trigger a validity icpt for the given scb */
set_validity_icpt(struct kvm_s390_sie_block * scb,__u16 reason_code)73 static int set_validity_icpt(struct kvm_s390_sie_block *scb,
74 			     __u16 reason_code)
75 {
76 	scb->ipa = 0x1000;
77 	scb->ipb = ((__u32) reason_code) << 16;
78 	scb->icptcode = ICPT_VALIDITY;
79 	return 1;
80 }
81 
82 /* mark the prefix as unmapped, this will block the VSIE */
prefix_unmapped(struct vsie_page * vsie_page)83 static void prefix_unmapped(struct vsie_page *vsie_page)
84 {
85 	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
86 }
87 
88 /* mark the prefix as unmapped and wait until the VSIE has been left */
prefix_unmapped_sync(struct vsie_page * vsie_page)89 static void prefix_unmapped_sync(struct vsie_page *vsie_page)
90 {
91 	prefix_unmapped(vsie_page);
92 	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
93 		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
94 	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
95 		cpu_relax();
96 }
97 
98 /* mark the prefix as mapped, this will allow the VSIE to run */
prefix_mapped(struct vsie_page * vsie_page)99 static void prefix_mapped(struct vsie_page *vsie_page)
100 {
101 	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
102 }
103 
104 /* test if the prefix is mapped into the gmap shadow */
prefix_is_mapped(struct vsie_page * vsie_page)105 static int prefix_is_mapped(struct vsie_page *vsie_page)
106 {
107 	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
108 }
109 
110 /* copy the updated intervention request bits into the shadow scb */
update_intervention_requests(struct vsie_page * vsie_page)111 static void update_intervention_requests(struct vsie_page *vsie_page)
112 {
113 	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
114 	int cpuflags;
115 
116 	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
117 	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
118 	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
119 }
120 
121 /* shadow (filter and validate) the cpuflags  */
prepare_cpuflags(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)122 static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
123 {
124 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
125 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
126 	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
127 
128 	/* we don't allow ESA/390 guests unless explicitly enabled */
129 	if (!(cpuflags & CPUSTAT_ZARCH) && !vcpu->kvm->arch.allow_vsie_esamode)
130 		return set_validity_icpt(scb_s, 0x0001U);
131 
132 	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
133 		return set_validity_icpt(scb_s, 0x0001U);
134 	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
135 		return set_validity_icpt(scb_s, 0x0007U);
136 
137 	/* intervention requests will be set later */
138 	newflags = 0;
139 	if (cpuflags & CPUSTAT_ZARCH)
140 		newflags = CPUSTAT_ZARCH;
141 	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
142 		newflags |= CPUSTAT_GED;
143 	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
144 		if (cpuflags & CPUSTAT_GED)
145 			return set_validity_icpt(scb_s, 0x0001U);
146 		newflags |= CPUSTAT_GED2;
147 	}
148 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
149 		newflags |= cpuflags & CPUSTAT_P;
150 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
151 		newflags |= cpuflags & CPUSTAT_SM;
152 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
153 		newflags |= cpuflags & CPUSTAT_IBS;
154 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
155 		newflags |= cpuflags & CPUSTAT_KSS;
156 
157 	atomic_set(&scb_s->cpuflags, newflags);
158 	return 0;
159 }
160 /* Copy to APCB FORMAT1 from APCB FORMAT0 */
setup_apcb10(struct kvm_vcpu * vcpu,struct kvm_s390_apcb1 * apcb_s,unsigned long crycb_gpa,struct kvm_s390_apcb1 * apcb_h)161 static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
162 			unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
163 {
164 	struct kvm_s390_apcb0 tmp;
165 	unsigned long apcb_gpa;
166 
167 	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
168 
169 	if (read_guest_real(vcpu, apcb_gpa, &tmp,
170 			    sizeof(struct kvm_s390_apcb0)))
171 		return -EFAULT;
172 
173 	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
174 	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
175 	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
176 
177 	return 0;
178 
179 }
180 
181 /**
182  * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
183  * @vcpu: pointer to the virtual CPU
184  * @apcb_s: pointer to start of apcb in the shadow crycb
185  * @crycb_gpa: guest physical address to start of original guest crycb
186  * @apcb_h: pointer to start of apcb in the guest1
187  *
188  * Returns 0 and -EFAULT on error reading guest apcb
189  */
setup_apcb00(struct kvm_vcpu * vcpu,unsigned long * apcb_s,unsigned long crycb_gpa,unsigned long * apcb_h)190 static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
191 			unsigned long crycb_gpa, unsigned long *apcb_h)
192 {
193 	unsigned long apcb_gpa;
194 
195 	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
196 
197 	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
198 			    sizeof(struct kvm_s390_apcb0)))
199 		return -EFAULT;
200 
201 	bitmap_and(apcb_s, apcb_s, apcb_h,
202 		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
203 
204 	return 0;
205 }
206 
207 /**
208  * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
209  * @vcpu: pointer to the virtual CPU
210  * @apcb_s: pointer to start of apcb in the shadow crycb
211  * @crycb_gpa: guest physical address to start of original guest crycb
212  * @apcb_h: pointer to start of apcb in the host
213  *
214  * Returns 0 and -EFAULT on error reading guest apcb
215  */
setup_apcb11(struct kvm_vcpu * vcpu,unsigned long * apcb_s,unsigned long crycb_gpa,unsigned long * apcb_h)216 static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
217 			unsigned long crycb_gpa,
218 			unsigned long *apcb_h)
219 {
220 	unsigned long apcb_gpa;
221 
222 	apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
223 
224 	if (read_guest_real(vcpu, apcb_gpa, apcb_s,
225 			    sizeof(struct kvm_s390_apcb1)))
226 		return -EFAULT;
227 
228 	bitmap_and(apcb_s, apcb_s, apcb_h,
229 		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
230 
231 	return 0;
232 }
233 
234 /**
235  * setup_apcb - Create a shadow copy of the apcb.
236  * @vcpu: pointer to the virtual CPU
237  * @crycb_s: pointer to shadow crycb
238  * @crycb_gpa: guest physical address of original guest crycb
239  * @crycb_h: pointer to the host crycb
240  * @fmt_o: format of the original guest crycb.
241  * @fmt_h: format of the host crycb.
242  *
243  * Checks the compatibility between the guest and host crycb and calls the
244  * appropriate copy function.
245  *
246  * Return 0 or an error number if the guest and host crycb are incompatible.
247  */
setup_apcb(struct kvm_vcpu * vcpu,struct kvm_s390_crypto_cb * crycb_s,const u32 crycb_gpa,struct kvm_s390_crypto_cb * crycb_h,int fmt_o,int fmt_h)248 static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
249 	       const u32 crycb_gpa,
250 	       struct kvm_s390_crypto_cb *crycb_h,
251 	       int fmt_o, int fmt_h)
252 {
253 	switch (fmt_o) {
254 	case CRYCB_FORMAT2:
255 		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
256 			return -EACCES;
257 		if (fmt_h != CRYCB_FORMAT2)
258 			return -EINVAL;
259 		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
260 				    crycb_gpa,
261 				    (unsigned long *)&crycb_h->apcb1);
262 	case CRYCB_FORMAT1:
263 		switch (fmt_h) {
264 		case CRYCB_FORMAT2:
265 			return setup_apcb10(vcpu, &crycb_s->apcb1,
266 					    crycb_gpa,
267 					    &crycb_h->apcb1);
268 		case CRYCB_FORMAT1:
269 			return setup_apcb00(vcpu,
270 					    (unsigned long *) &crycb_s->apcb0,
271 					    crycb_gpa,
272 					    (unsigned long *) &crycb_h->apcb0);
273 		}
274 		break;
275 	case CRYCB_FORMAT0:
276 		if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
277 			return -EACCES;
278 
279 		switch (fmt_h) {
280 		case CRYCB_FORMAT2:
281 			return setup_apcb10(vcpu, &crycb_s->apcb1,
282 					    crycb_gpa,
283 					    &crycb_h->apcb1);
284 		case CRYCB_FORMAT1:
285 		case CRYCB_FORMAT0:
286 			return setup_apcb00(vcpu,
287 					    (unsigned long *) &crycb_s->apcb0,
288 					    crycb_gpa,
289 					    (unsigned long *) &crycb_h->apcb0);
290 		}
291 	}
292 	return -EINVAL;
293 }
294 
295 /**
296  * shadow_crycb - Create a shadow copy of the crycb block
297  * @vcpu: a pointer to the virtual CPU
298  * @vsie_page: a pointer to internal date used for the vSIE
299  *
300  * Create a shadow copy of the crycb block and setup key wrapping, if
301  * requested for guest 3 and enabled for guest 2.
302  *
303  * We accept format-1 or format-2, but we convert format-1 into format-2
304  * in the shadow CRYCB.
305  * Using format-2 enables the firmware to choose the right format when
306  * scheduling the SIE.
307  * There is nothing to do for format-0.
308  *
309  * This function centralize the issuing of set_validity_icpt() for all
310  * the subfunctions working on the crycb.
311  *
312  * Returns: - 0 if shadowed or nothing to do
313  *          - > 0 if control has to be given to guest 2
314  */
shadow_crycb(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)315 static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
316 {
317 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
318 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
319 	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
320 	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
321 	unsigned long *b1, *b2;
322 	u8 ecb3_flags;
323 	u32 ecd_flags;
324 	int apie_h;
325 	int apie_s;
326 	int key_msk = test_kvm_facility(vcpu->kvm, 76);
327 	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
328 	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
329 	int ret = 0;
330 
331 	scb_s->crycbd = 0;
332 
333 	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
334 	apie_s = apie_h & scb_o->eca;
335 	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
336 		return 0;
337 
338 	if (!crycb_addr)
339 		return set_validity_icpt(scb_s, 0x0039U);
340 
341 	if (fmt_o == CRYCB_FORMAT1)
342 		if ((crycb_addr & PAGE_MASK) !=
343 		    ((crycb_addr + 128) & PAGE_MASK))
344 			return set_validity_icpt(scb_s, 0x003CU);
345 
346 	if (apie_s) {
347 		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
348 				 vcpu->kvm->arch.crypto.crycb,
349 				 fmt_o, fmt_h);
350 		if (ret)
351 			goto end;
352 		scb_s->eca |= scb_o->eca & ECA_APIE;
353 	}
354 
355 	/* we may only allow it if enabled for guest 2 */
356 	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
357 		     (ECB3_AES | ECB3_DEA);
358 	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd &
359 		     (ECD_ECC | ECD_HMAC);
360 	if (!ecb3_flags && !ecd_flags)
361 		goto end;
362 
363 	/* copy only the wrapping keys */
364 	if (read_guest_real(vcpu, crycb_addr + 72,
365 			    vsie_page->crycb.dea_wrapping_key_mask, 56))
366 		return set_validity_icpt(scb_s, 0x0035U);
367 
368 	scb_s->ecb3 |= ecb3_flags;
369 	scb_s->ecd |= ecd_flags;
370 
371 	/* xor both blocks in one run */
372 	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
373 	b2 = (unsigned long *)
374 			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
375 	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
376 	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
377 end:
378 	switch (ret) {
379 	case -EINVAL:
380 		return set_validity_icpt(scb_s, 0x0022U);
381 	case -EFAULT:
382 		return set_validity_icpt(scb_s, 0x0035U);
383 	case -EACCES:
384 		return set_validity_icpt(scb_s, 0x003CU);
385 	}
386 	scb_s->crycbd = (u32)virt_to_phys(&vsie_page->crycb) | CRYCB_FORMAT2;
387 	return 0;
388 }
389 
shadow_esa(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)390 static void shadow_esa(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
391 {
392 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
393 
394 	/* Ensure these bits are indeed turned off */
395 	scb_s->eca &= ~ECA_VX;
396 	scb_s->ecb &= ~(ECB_GS | ECB_TE);
397 	scb_s->ecb3 &= ~ECB3_RI;
398 	scb_s->ecd &= ~ECD_HOSTREGMGMT;
399 }
400 
401 /* shadow (round up/down) the ibc to avoid validity icpt */
prepare_ibc(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)402 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
403 {
404 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
405 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
406 	/* READ_ONCE does not work on bitfields - use a temporary variable */
407 	const uint32_t __new_ibc = scb_o->ibc;
408 	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
409 	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
410 
411 	scb_s->ibc = 0;
412 	/* ibc installed in g2 and requested for g3 */
413 	if (vcpu->kvm->arch.model.ibc && new_ibc) {
414 		scb_s->ibc = new_ibc;
415 		/* takte care of the minimum ibc level of the machine */
416 		if (scb_s->ibc < min_ibc)
417 			scb_s->ibc = min_ibc;
418 		/* take care of the maximum ibc level set for the guest */
419 		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
420 			scb_s->ibc = vcpu->kvm->arch.model.ibc;
421 	}
422 }
423 
424 /* unshadow the scb, copying parameters back to the real scb */
unshadow_scb(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)425 static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
426 {
427 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
428 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
429 
430 	/* interception */
431 	scb_o->icptcode = scb_s->icptcode;
432 	scb_o->icptstatus = scb_s->icptstatus;
433 	scb_o->ipa = scb_s->ipa;
434 	scb_o->ipb = scb_s->ipb;
435 	scb_o->gbea = scb_s->gbea;
436 
437 	/* timer */
438 	scb_o->cputm = scb_s->cputm;
439 	scb_o->ckc = scb_s->ckc;
440 	scb_o->todpr = scb_s->todpr;
441 
442 	/* guest state */
443 	scb_o->gpsw = scb_s->gpsw;
444 	scb_o->gg14 = scb_s->gg14;
445 	scb_o->gg15 = scb_s->gg15;
446 	memcpy(scb_o->gcr, scb_s->gcr, 128);
447 	scb_o->pp = scb_s->pp;
448 
449 	/* branch prediction */
450 	if (test_kvm_facility(vcpu->kvm, 82)) {
451 		scb_o->fpf &= ~FPF_BPBC;
452 		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
453 	}
454 
455 	/* interrupt intercept */
456 	switch (scb_s->icptcode) {
457 	case ICPT_PROGI:
458 	case ICPT_INSTPROGI:
459 	case ICPT_EXTINT:
460 		memcpy((void *)((u64)scb_o + 0xc0),
461 		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
462 		break;
463 	}
464 
465 	if (scb_s->ihcpu != 0xffffU)
466 		scb_o->ihcpu = scb_s->ihcpu;
467 }
468 
469 /*
470  * Setup the shadow scb by copying and checking the relevant parts of the g2
471  * provided scb.
472  *
473  * Returns: - 0 if the scb has been shadowed
474  *          - > 0 if control has to be given to guest 2
475  */
shadow_scb(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)476 static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
477 {
478 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
479 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
480 	/* READ_ONCE does not work on bitfields - use a temporary variable */
481 	const uint32_t __new_prefix = scb_o->prefix;
482 	uint32_t new_prefix = READ_ONCE(__new_prefix);
483 	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
484 	bool had_tx = scb_s->ecb & ECB_TE;
485 	unsigned long new_mso = 0;
486 	int rc;
487 
488 	/* make sure we don't have any leftovers when reusing the scb */
489 	scb_s->icptcode = 0;
490 	scb_s->eca = 0;
491 	scb_s->ecb = 0;
492 	scb_s->ecb2 = 0;
493 	scb_s->ecb3 = 0;
494 	scb_s->ecd = 0;
495 	scb_s->fac = 0;
496 	scb_s->fpf = 0;
497 
498 	rc = prepare_cpuflags(vcpu, vsie_page);
499 	if (rc)
500 		goto out;
501 
502 	/* timer */
503 	scb_s->cputm = scb_o->cputm;
504 	scb_s->ckc = scb_o->ckc;
505 	scb_s->todpr = scb_o->todpr;
506 	scb_s->epoch = scb_o->epoch;
507 
508 	/* guest state */
509 	scb_s->gpsw = scb_o->gpsw;
510 	scb_s->gg14 = scb_o->gg14;
511 	scb_s->gg15 = scb_o->gg15;
512 	memcpy(scb_s->gcr, scb_o->gcr, 128);
513 	scb_s->pp = scb_o->pp;
514 
515 	/* interception / execution handling */
516 	scb_s->gbea = scb_o->gbea;
517 	scb_s->lctl = scb_o->lctl;
518 	scb_s->svcc = scb_o->svcc;
519 	scb_s->ictl = scb_o->ictl;
520 	/*
521 	 * SKEY handling functions can't deal with false setting of PTE invalid
522 	 * bits. Therefore we cannot provide interpretation and would later
523 	 * have to provide own emulation handlers.
524 	 */
525 	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
526 		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
527 
528 	scb_s->icpua = scb_o->icpua;
529 
530 	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_ZARCH))
531 		new_prefix &= GUEST_PREFIX_MASK_ESA;
532 	else
533 		new_prefix &= GUEST_PREFIX_MASK_ZARCH;
534 
535 	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
536 		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
537 	/* if the hva of the prefix changes, we have to remap the prefix */
538 	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
539 		prefix_unmapped(vsie_page);
540 	 /* SIE will do mso/msl validity and exception checks for us */
541 	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
542 	scb_s->mso = new_mso;
543 	scb_s->prefix = new_prefix;
544 
545 	/* We have to definitely flush the tlb if this scb never ran */
546 	if (scb_s->ihcpu != 0xffffU)
547 		scb_s->ihcpu = scb_o->ihcpu;
548 
549 	/* MVPG and Protection Exception Interpretation are always available */
550 	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
551 	/* Host-protection-interruption introduced with ESOP */
552 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
553 		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
554 	/*
555 	 * CPU Topology
556 	 * This facility only uses the utility field of the SCA and none of
557 	 * the cpu entries that are problematic with the other interpretation
558 	 * facilities so we can pass it through
559 	 */
560 	if (test_kvm_facility(vcpu->kvm, 11))
561 		scb_s->ecb |= scb_o->ecb & ECB_PTF;
562 	/* transactional execution */
563 	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
564 		/* remap the prefix is tx is toggled on */
565 		if (!had_tx)
566 			prefix_unmapped(vsie_page);
567 		scb_s->ecb |= ECB_TE;
568 	}
569 	/* specification exception interpretation */
570 	scb_s->ecb |= scb_o->ecb & ECB_SPECI;
571 	/* branch prediction */
572 	if (test_kvm_facility(vcpu->kvm, 82))
573 		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
574 	/* SIMD */
575 	if (test_kvm_facility(vcpu->kvm, 129)) {
576 		scb_s->eca |= scb_o->eca & ECA_VX;
577 		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
578 	}
579 	/* Run-time-Instrumentation */
580 	if (test_kvm_facility(vcpu->kvm, 64))
581 		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
582 	/* Instruction Execution Prevention */
583 	if (test_kvm_facility(vcpu->kvm, 130))
584 		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
585 	/* Guarded Storage */
586 	if (test_kvm_facility(vcpu->kvm, 133)) {
587 		scb_s->ecb |= scb_o->ecb & ECB_GS;
588 		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
589 	}
590 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
591 		scb_s->eca |= scb_o->eca & ECA_SII;
592 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
593 		scb_s->eca |= scb_o->eca & ECA_IB;
594 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
595 		scb_s->eca |= scb_o->eca & ECA_CEI;
596 	/* Epoch Extension */
597 	if (test_kvm_facility(vcpu->kvm, 139)) {
598 		scb_s->ecd |= scb_o->ecd & ECD_MEF;
599 		scb_s->epdx = scb_o->epdx;
600 	}
601 
602 	/* etoken */
603 	if (test_kvm_facility(vcpu->kvm, 156))
604 		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
605 
606 	scb_s->hpid = HPID_VSIE;
607 	scb_s->cpnc = scb_o->cpnc;
608 
609 	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_ZARCH))
610 		shadow_esa(vcpu, vsie_page);
611 
612 	prepare_ibc(vcpu, vsie_page);
613 	rc = shadow_crycb(vcpu, vsie_page);
614 out:
615 	if (rc)
616 		unshadow_scb(vcpu, vsie_page);
617 	return rc;
618 }
619 
kvm_s390_vsie_gmap_notifier(struct gmap * gmap,gpa_t start,gpa_t end)620 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, gpa_t start, gpa_t end)
621 {
622 	struct vsie_page *cur, *next;
623 	unsigned long prefix;
624 
625 	KVM_BUG_ON(!test_bit(GMAP_FLAG_SHADOW, &gmap->flags), gmap->kvm);
626 	/*
627 	 * Only new shadow blocks are added to the list during runtime,
628 	 * therefore we can safely reference them all the time.
629 	 */
630 	list_for_each_entry_safe(cur, next, &gmap->scb_users, gmap_cache.list) {
631 		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
632 		/* with mso/msl, the prefix lies at an offset */
633 		prefix += cur->scb_s.mso;
634 		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
635 			prefix_unmapped_sync(cur);
636 	}
637 }
638 
639 /*
640  * Map the first prefix page and if tx is enabled also the second prefix page.
641  *
642  * The prefix will be protected, a gmap notifier will inform about unmaps.
643  * The shadow scb must not be executed until the prefix is remapped, this is
644  * guaranteed by properly handling PROG_REQUEST.
645  *
646  * Returns: - 0 on if successfully mapped or already mapped
647  *          - > 0 if control has to be given to guest 2
648  *          - -EAGAIN if the caller can retry immediately
649  *          - -ENOMEM if out of memory
650  */
map_prefix(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,struct gmap * sg)651 static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg)
652 {
653 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
654 	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
655 	int rc;
656 
657 	if (prefix_is_mapped(vsie_page))
658 		return 0;
659 
660 	/* mark it as mapped so we can catch any concurrent unmappers */
661 	prefix_mapped(vsie_page);
662 
663 	/* with mso/msl, the prefix lies at offset *mso* */
664 	prefix += scb_s->mso;
665 
666 	rc = gaccess_shadow_fault(vcpu, sg, prefix, NULL, true);
667 	if (!rc && (scb_s->ecb & ECB_TE))
668 		rc = gaccess_shadow_fault(vcpu, sg, prefix + PAGE_SIZE, NULL, true);
669 	/*
670 	 * We don't have to mprotect, we will be called for all unshadows.
671 	 * SIE will detect if protection applies and trigger a validity.
672 	 */
673 	if (rc)
674 		prefix_unmapped(vsie_page);
675 	if (rc > 0 || rc == -EFAULT)
676 		rc = set_validity_icpt(scb_s, 0x0037U);
677 	return rc;
678 }
679 
680 /*
681  * Pin the guest page given by gpa and set hpa to the pinned host address.
682  * Will always be pinned writable.
683  *
684  * Returns: - 0 on success
685  *          - -EINVAL if the gpa is not valid guest storage
686  */
pin_guest_page(struct kvm * kvm,gpa_t gpa,hpa_t * hpa)687 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
688 {
689 	struct page *page;
690 
691 	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
692 	if (!page)
693 		return -EINVAL;
694 	*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
695 	return 0;
696 }
697 
698 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
unpin_guest_page(struct kvm * kvm,gpa_t gpa,hpa_t hpa)699 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
700 {
701 	kvm_release_page_dirty(pfn_to_page(hpa >> PAGE_SHIFT));
702 	/* mark the page always as dirty for migration */
703 	mark_page_dirty(kvm, gpa_to_gfn(gpa));
704 }
705 
706 /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
unpin_blocks(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)707 static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
708 {
709 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
710 	hpa_t hpa;
711 
712 	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
713 	if (hpa) {
714 		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
715 		vsie_page->sca_gpa = 0;
716 		scb_s->scaol = 0;
717 		scb_s->scaoh = 0;
718 	}
719 
720 	hpa = scb_s->itdba;
721 	if (hpa) {
722 		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
723 		vsie_page->itdba_gpa = 0;
724 		scb_s->itdba = 0;
725 	}
726 
727 	hpa = scb_s->gvrd;
728 	if (hpa) {
729 		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
730 		vsie_page->gvrd_gpa = 0;
731 		scb_s->gvrd = 0;
732 	}
733 
734 	hpa = scb_s->riccbd;
735 	if (hpa) {
736 		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
737 		vsie_page->riccbd_gpa = 0;
738 		scb_s->riccbd = 0;
739 	}
740 
741 	hpa = scb_s->sdnxo;
742 	if (hpa) {
743 		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
744 		vsie_page->sdnx_gpa = 0;
745 		scb_s->sdnxo = 0;
746 	}
747 }
748 
749 /*
750  * Instead of shadowing some blocks, we can simply forward them because the
751  * addresses in the scb are 64 bit long.
752  *
753  * This works as long as the data lies in one page. If blocks ever exceed one
754  * page, we have to fall back to shadowing.
755  *
756  * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
757  * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
758  *
759  * Returns: - 0 if all blocks were pinned.
760  *          - > 0 if control has to be given to guest 2
761  *          - -ENOMEM if out of memory
762  */
pin_blocks(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)763 static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
764 {
765 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
766 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
767 	hpa_t hpa;
768 	gpa_t gpa;
769 	int rc = 0;
770 
771 	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
772 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
773 		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
774 	if (gpa) {
775 		if (gpa < 2 * PAGE_SIZE)
776 			rc = set_validity_icpt(scb_s, 0x0038U);
777 		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
778 			rc = set_validity_icpt(scb_s, 0x0011U);
779 		else if ((gpa & PAGE_MASK) !=
780 			 ((gpa + offsetof(struct bsca_block, cpu[0]) - 1) & PAGE_MASK))
781 			rc = set_validity_icpt(scb_s, 0x003bU);
782 		if (!rc) {
783 			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
784 			if (rc)
785 				rc = set_validity_icpt(scb_s, 0x0034U);
786 		}
787 		if (rc)
788 			goto unpin;
789 		vsie_page->sca_gpa = gpa;
790 		scb_s->scaoh = (u32)((u64)hpa >> 32);
791 		scb_s->scaol = (u32)(u64)hpa;
792 	}
793 
794 	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
795 	if (gpa && (scb_s->ecb & ECB_TE)) {
796 		if (gpa < 2 * PAGE_SIZE) {
797 			rc = set_validity_icpt(scb_s, 0x0080U);
798 			goto unpin;
799 		}
800 		/* 256 bytes cannot cross page boundaries */
801 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
802 		if (rc) {
803 			rc = set_validity_icpt(scb_s, 0x0080U);
804 			goto unpin;
805 		}
806 		vsie_page->itdba_gpa = gpa;
807 		scb_s->itdba = hpa;
808 	}
809 
810 	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
811 	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
812 		if (gpa < 2 * PAGE_SIZE) {
813 			rc = set_validity_icpt(scb_s, 0x1310U);
814 			goto unpin;
815 		}
816 		/*
817 		 * 512 bytes vector registers cannot cross page boundaries
818 		 * if this block gets bigger, we have to shadow it.
819 		 */
820 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
821 		if (rc) {
822 			rc = set_validity_icpt(scb_s, 0x1310U);
823 			goto unpin;
824 		}
825 		vsie_page->gvrd_gpa = gpa;
826 		scb_s->gvrd = hpa;
827 	}
828 
829 	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
830 	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
831 		if (gpa < 2 * PAGE_SIZE) {
832 			rc = set_validity_icpt(scb_s, 0x0043U);
833 			goto unpin;
834 		}
835 		/* 64 bytes cannot cross page boundaries */
836 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
837 		if (rc) {
838 			rc = set_validity_icpt(scb_s, 0x0043U);
839 			goto unpin;
840 		}
841 		/* Validity 0x0044 will be checked by SIE */
842 		vsie_page->riccbd_gpa = gpa;
843 		scb_s->riccbd = hpa;
844 	}
845 	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
846 	    (scb_s->ecd & ECD_ETOKENF)) {
847 		unsigned long sdnxc;
848 
849 		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
850 		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
851 		if (!gpa || gpa < 2 * PAGE_SIZE) {
852 			rc = set_validity_icpt(scb_s, 0x10b0U);
853 			goto unpin;
854 		}
855 		if (sdnxc < 6 || sdnxc > 12) {
856 			rc = set_validity_icpt(scb_s, 0x10b1U);
857 			goto unpin;
858 		}
859 		if (gpa & ((1 << sdnxc) - 1)) {
860 			rc = set_validity_icpt(scb_s, 0x10b2U);
861 			goto unpin;
862 		}
863 		/* Due to alignment rules (checked above) this cannot
864 		 * cross page boundaries
865 		 */
866 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
867 		if (rc) {
868 			rc = set_validity_icpt(scb_s, 0x10b0U);
869 			goto unpin;
870 		}
871 		vsie_page->sdnx_gpa = gpa;
872 		scb_s->sdnxo = hpa | sdnxc;
873 	}
874 	return 0;
875 unpin:
876 	unpin_blocks(vcpu, vsie_page);
877 	return rc;
878 }
879 
880 /* unpin the scb provided by guest 2, marking it as dirty */
unpin_scb(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,gpa_t gpa)881 static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
882 		      gpa_t gpa)
883 {
884 	hpa_t hpa = virt_to_phys(vsie_page->scb_o);
885 
886 	if (hpa)
887 		unpin_guest_page(vcpu->kvm, gpa, hpa);
888 	vsie_page->scb_o = NULL;
889 }
890 
891 /*
892  * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
893  *
894  * Returns: - 0 if the scb was pinned.
895  *          - > 0 if control has to be given to guest 2
896  */
pin_scb(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,gpa_t gpa)897 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
898 		   gpa_t gpa)
899 {
900 	hpa_t hpa;
901 	int rc;
902 
903 	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
904 	if (rc) {
905 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
906 		WARN_ON_ONCE(rc);
907 		return 1;
908 	}
909 	vsie_page->scb_o = phys_to_virt(hpa);
910 	return 0;
911 }
912 
913 /*
914  * Inject a fault into guest 2.
915  *
916  * Returns: - > 0 if control has to be given to guest 2
917  *            < 0 if an error occurred during injection.
918  */
inject_fault(struct kvm_vcpu * vcpu,__u16 code,__u64 vaddr,bool write_flag)919 static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
920 			bool write_flag)
921 {
922 	struct kvm_s390_pgm_info pgm = {
923 		.code = code,
924 		.trans_exc_code =
925 			/* 0-51: virtual address */
926 			(vaddr & 0xfffffffffffff000UL) |
927 			/* 52-53: store / fetch */
928 			(((unsigned int) !write_flag) + 1) << 10,
929 			/* 62-63: asce id (always primary == 0) */
930 		.exc_access_id = 0, /* always primary */
931 		.op_access_id = 0, /* not MVPG */
932 	};
933 	int rc;
934 
935 	if (code == PGM_PROTECTION)
936 		pgm.trans_exc_code |= 0x4UL;
937 
938 	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
939 	return rc ? rc : 1;
940 }
941 
942 /*
943  * Handle a fault during vsie execution on a gmap shadow.
944  *
945  * Returns: - 0 if the fault was resolved
946  *          - > 0 if control has to be given to guest 2
947  *          - < 0 if an error occurred
948  */
handle_fault(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,struct gmap * sg)949 static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg)
950 {
951 	bool wr = kvm_s390_cur_gmap_fault_is_write();
952 	int rc;
953 
954 	if ((current->thread.gmap_int_code & PGM_INT_CODE_MASK) == PGM_PROTECTION)
955 		/* we can directly forward all protection exceptions */
956 		return inject_fault(vcpu, PGM_PROTECTION,
957 				    current->thread.gmap_teid.addr * PAGE_SIZE, 1);
958 
959 	rc = gaccess_shadow_fault(vcpu, sg, current->thread.gmap_teid.addr * PAGE_SIZE, NULL, wr);
960 	if (rc > 0) {
961 		rc = inject_fault(vcpu, rc,
962 				  current->thread.gmap_teid.addr * PAGE_SIZE, wr);
963 		if (rc >= 0)
964 			vsie_page->fault_addr = current->thread.gmap_teid.addr * PAGE_SIZE;
965 	}
966 	return rc;
967 }
968 
969 /*
970  * Retry the previous fault that required guest 2 intervention. This avoids
971  * one superfluous SIE re-entry and direct exit.
972  *
973  * Will ignore any errors. The next SIE fault will do proper fault handling.
974  */
handle_last_fault(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,struct gmap * sg)975 static void handle_last_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg)
976 {
977 	if (vsie_page->fault_addr)
978 		gaccess_shadow_fault(vcpu, sg, vsie_page->fault_addr, NULL, true);
979 	vsie_page->fault_addr = 0;
980 }
981 
clear_vsie_icpt(struct vsie_page * vsie_page)982 static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
983 {
984 	vsie_page->scb_s.icptcode = 0;
985 }
986 
987 /* rewind the psw and clear the vsie icpt, so we can retry execution */
retry_vsie_icpt(struct vsie_page * vsie_page)988 static void retry_vsie_icpt(struct vsie_page *vsie_page)
989 {
990 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
991 	int ilen = insn_length(scb_s->ipa >> 8);
992 
993 	/* take care of EXECUTE instructions */
994 	if (scb_s->icptstatus & 1) {
995 		ilen = (scb_s->icptstatus >> 4) & 0x6;
996 		if (!ilen)
997 			ilen = 4;
998 	}
999 	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
1000 	clear_vsie_icpt(vsie_page);
1001 }
1002 
1003 /*
1004  * Try to shadow + enable the guest 2 provided facility list.
1005  * Retry instruction execution if enabled for and provided by guest 2.
1006  *
1007  * Returns: - 0 if handled (retry or guest 2 icpt)
1008  *          - > 0 if control has to be given to guest 2
1009  */
handle_stfle(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)1010 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1011 {
1012 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1013 	__u32 fac = READ_ONCE(vsie_page->scb_o->fac);
1014 
1015 	/*
1016 	 * Alternate-STFLE-Interpretive-Execution facilities are not supported
1017 	 * -> format-0 flcb
1018 	 */
1019 	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
1020 		retry_vsie_icpt(vsie_page);
1021 		/*
1022 		 * The facility list origin (FLO) is in bits 1 - 28 of the FLD
1023 		 * so we need to mask here before reading.
1024 		 */
1025 		fac = fac & 0x7ffffff8U;
1026 		/*
1027 		 * format-0 -> size of nested guest's facility list == guest's size
1028 		 * guest's size == host's size, since STFLE is interpretatively executed
1029 		 * using a format-0 for the guest, too.
1030 		 */
1031 		if (read_guest_real(vcpu, fac, &vsie_page->fac,
1032 				    stfle_size() * sizeof(u64)))
1033 			return set_validity_icpt(scb_s, 0x1090U);
1034 		scb_s->fac = (u32)virt_to_phys(&vsie_page->fac);
1035 	}
1036 	return 0;
1037 }
1038 
1039 /*
1040  * Get a register for a nested guest.
1041  * @vcpu the vcpu of the guest
1042  * @vsie_page the vsie_page for the nested guest
1043  * @reg the register number, the upper 4 bits are ignored.
1044  * returns: the value of the register.
1045  */
vsie_get_register(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,u8 reg)1046 static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
1047 {
1048 	/* no need to validate the parameter and/or perform error handling */
1049 	reg &= 0xf;
1050 	switch (reg) {
1051 	case 15:
1052 		return vsie_page->scb_s.gg15;
1053 	case 14:
1054 		return vsie_page->scb_s.gg14;
1055 	default:
1056 		return vcpu->run->s.regs.gprs[reg];
1057 	}
1058 }
1059 
vsie_handle_mvpg(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,struct gmap * sg)1060 static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg)
1061 {
1062 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1063 	unsigned long src, dest, mask, prefix;
1064 	u64 *pei_block = &vsie_page->scb_o->mcic;
1065 	union mvpg_pei pei_dest, pei_src;
1066 	int edat, rc_dest, rc_src;
1067 	union ctlreg0 cr0;
1068 
1069 	cr0.val = vcpu->arch.sie_block->gcr[0];
1070 	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1071 	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
1072 	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
1073 
1074 	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
1075 	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
1076 	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
1077 	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
1078 
1079 	rc_dest = gaccess_shadow_fault(vcpu, sg, dest, &pei_dest, true);
1080 	rc_src = gaccess_shadow_fault(vcpu, sg, src, &pei_src, false);
1081 	/*
1082 	 * Either everything went well, or something non-critical went wrong
1083 	 * e.g. because of a race. In either case, simply retry.
1084 	 */
1085 	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
1086 		retry_vsie_icpt(vsie_page);
1087 		return -EAGAIN;
1088 	}
1089 	/* Something more serious went wrong, propagate the error */
1090 	if (rc_dest < 0)
1091 		return rc_dest;
1092 	if (rc_src < 0)
1093 		return rc_src;
1094 
1095 	/* The only possible suppressing exception: just deliver it */
1096 	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
1097 		clear_vsie_icpt(vsie_page);
1098 		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
1099 		WARN_ON_ONCE(rc_dest);
1100 		return 1;
1101 	}
1102 
1103 	/*
1104 	 * Forward the PEI intercept to the guest if it was a page fault, or
1105 	 * also for segment and region table faults if EDAT applies.
1106 	 */
1107 	if (edat) {
1108 		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
1109 		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
1110 	} else {
1111 		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
1112 		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
1113 	}
1114 	if (!rc_dest && !rc_src) {
1115 		pei_block[0] = pei_dest.val;
1116 		pei_block[1] = pei_src.val;
1117 		return 1;
1118 	}
1119 
1120 	retry_vsie_icpt(vsie_page);
1121 
1122 	/*
1123 	 * The host has edat, and the guest does not, or it was an ASCE type
1124 	 * exception. The host needs to inject the appropriate DAT interrupts
1125 	 * into the guest.
1126 	 */
1127 	if (rc_dest)
1128 		return inject_fault(vcpu, rc_dest, dest, 1);
1129 	return inject_fault(vcpu, rc_src, src, 0);
1130 }
1131 
1132 /*
1133  * Run the vsie on a shadow scb and a shadow gmap, without any further
1134  * sanity checks, handling SIE faults.
1135  *
1136  * Returns: - 0 everything went fine
1137  *          - > 0 if control has to be given to guest 2
1138  *          - < 0 if an error occurred
1139  */
do_vsie_run(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page,struct gmap * sg)1140 static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struct gmap *sg)
1141 	__releases(vcpu->kvm->srcu)
1142 	__acquires(vcpu->kvm->srcu)
1143 {
1144 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1145 	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1146 	unsigned long sie_return = SIE64_RETURN_NORMAL;
1147 	int guest_bp_isolation;
1148 	int rc = 0;
1149 
1150 	handle_last_fault(vcpu, vsie_page, sg);
1151 
1152 	kvm_vcpu_srcu_read_unlock(vcpu);
1153 
1154 	/* save current guest state of bp isolation override */
1155 	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1156 
1157 	/*
1158 	 * The guest is running with BPBC, so we have to force it on for our
1159 	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1160 	 * control in the SCB (which the nested guest can modify) is simply
1161 	 * ignored.
1162 	 */
1163 	if (test_kvm_facility(vcpu->kvm, 82) &&
1164 	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1165 		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1166 
1167 	/*
1168 	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1169 	 * and VCPU requests also hinder the vSIE from running and lead
1170 	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1171 	 * also kick the vSIE.
1172 	 */
1173 	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1174 	current->thread.gmap_int_code = 0;
1175 	barrier();
1176 	if (!kvm_s390_vcpu_sie_inhibited(vcpu)) {
1177 xfer_to_guest_mode_check:
1178 		local_irq_disable();
1179 		xfer_to_guest_mode_prepare();
1180 		if (xfer_to_guest_mode_work_pending()) {
1181 			local_irq_enable();
1182 			rc = kvm_xfer_to_guest_mode_handle_work(vcpu);
1183 			if (rc)
1184 				goto skip_sie;
1185 			goto xfer_to_guest_mode_check;
1186 		}
1187 		guest_timing_enter_irqoff();
1188 		sie_return = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
1189 		guest_timing_exit_irqoff();
1190 		local_irq_enable();
1191 	}
1192 
1193 skip_sie:
1194 	barrier();
1195 	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1196 
1197 	/* restore guest state for bp isolation override */
1198 	if (!guest_bp_isolation)
1199 		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1200 
1201 	kvm_vcpu_srcu_read_lock(vcpu);
1202 
1203 	if (sie_return == SIE64_RETURN_MCCK) {
1204 		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1205 		return 0;
1206 	}
1207 
1208 	WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
1209 
1210 	if (rc > 0)
1211 		rc = 0; /* we could still have an icpt */
1212 	else if (current->thread.gmap_int_code)
1213 		return handle_fault(vcpu, vsie_page, sg);
1214 
1215 	switch (scb_s->icptcode) {
1216 	case ICPT_INST:
1217 		if (scb_s->ipa == 0xb2b0)
1218 			rc = handle_stfle(vcpu, vsie_page);
1219 		break;
1220 	case ICPT_STOP:
1221 		/* stop not requested by g2 - must have been a kick */
1222 		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1223 			clear_vsie_icpt(vsie_page);
1224 		break;
1225 	case ICPT_VALIDITY:
1226 		if ((scb_s->ipa & 0xf000) != 0xf000)
1227 			scb_s->ipa += 0x1000;
1228 		break;
1229 	case ICPT_PARTEXEC:
1230 		if (scb_s->ipa == 0xb254)
1231 			rc = vsie_handle_mvpg(vcpu, vsie_page, sg);
1232 		break;
1233 	}
1234 	return rc;
1235 }
1236 
release_gmap_shadow(struct vsie_page * vsie_page)1237 static void release_gmap_shadow(struct vsie_page *vsie_page)
1238 {
1239 	struct gmap *gmap = vsie_page->gmap_cache.gmap;
1240 
1241 	lockdep_assert_held(&gmap->kvm->arch.gmap->children_lock);
1242 
1243 	list_del(&vsie_page->gmap_cache.list);
1244 	vsie_page->gmap_cache.gmap = NULL;
1245 	prefix_unmapped(vsie_page);
1246 
1247 	if (list_empty(&gmap->scb_users)) {
1248 		gmap_remove_child(gmap);
1249 		gmap_put(gmap);
1250 	}
1251 }
1252 
acquire_gmap_shadow(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)1253 static struct gmap *acquire_gmap_shadow(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1254 {
1255 	union ctlreg0 cr0;
1256 	struct gmap *gmap;
1257 	union asce asce;
1258 	int edat;
1259 
1260 	asce.val = vcpu->arch.sie_block->gcr[1];
1261 	cr0.val = vcpu->arch.sie_block->gcr[0];
1262 	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1263 	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1264 
1265 	scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock) {
1266 		gmap = vsie_page->gmap_cache.gmap;
1267 		if (gmap) {
1268 			/*
1269 			 * ASCE or EDAT could have changed since last icpt, or the gmap
1270 			 * we're holding has been unshadowed. If the gmap is still valid,
1271 			 * we can safely reuse it.
1272 			 */
1273 			if (gmap_is_shadow_valid(gmap, asce, edat)) {
1274 				vcpu->kvm->stat.gmap_shadow_reuse++;
1275 				gmap_get(gmap);
1276 				return gmap;
1277 			}
1278 			/* release the old shadow and mark the prefix as unmapped */
1279 			release_gmap_shadow(vsie_page);
1280 		}
1281 	}
1282 again:
1283 	gmap = gmap_create_shadow(vcpu->arch.mc, vcpu->kvm->arch.gmap, asce, edat);
1284 	if (IS_ERR(gmap))
1285 		return gmap;
1286 	scoped_guard(spinlock, &vcpu->kvm->arch.gmap->children_lock) {
1287 		/* unlikely race condition, remove the previous shadow */
1288 		if (vsie_page->gmap_cache.gmap)
1289 			release_gmap_shadow(vsie_page);
1290 		if (!gmap->parent) {
1291 			gmap_put(gmap);
1292 			goto again;
1293 		}
1294 		vcpu->kvm->stat.gmap_shadow_create++;
1295 		list_add(&vsie_page->gmap_cache.list, &gmap->scb_users);
1296 		vsie_page->gmap_cache.gmap = gmap;
1297 		prefix_unmapped(vsie_page);
1298 	}
1299 	return gmap;
1300 }
1301 
1302 /*
1303  * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1304  */
register_shadow_scb(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)1305 static void register_shadow_scb(struct kvm_vcpu *vcpu,
1306 				struct vsie_page *vsie_page)
1307 {
1308 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1309 
1310 	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1311 	/*
1312 	 * External calls have to lead to a kick of the vcpu and
1313 	 * therefore the vsie -> Simulate Wait state.
1314 	 */
1315 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1316 	/*
1317 	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1318 	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1319 	 */
1320 	preempt_disable();
1321 	scb_s->epoch += vcpu->kvm->arch.epoch;
1322 
1323 	if (scb_s->ecd & ECD_MEF) {
1324 		scb_s->epdx += vcpu->kvm->arch.epdx;
1325 		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1326 			scb_s->epdx += 1;
1327 	}
1328 
1329 	preempt_enable();
1330 }
1331 
1332 /*
1333  * Unregister a shadow scb from a VCPU.
1334  */
unregister_shadow_scb(struct kvm_vcpu * vcpu)1335 static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1336 {
1337 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1338 	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1339 }
1340 
1341 /*
1342  * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1343  * prefix pages and faults.
1344  *
1345  * Returns: - 0 if no errors occurred
1346  *          - > 0 if control has to be given to guest 2
1347  *          - -ENOMEM if out of memory
1348  */
vsie_run(struct kvm_vcpu * vcpu,struct vsie_page * vsie_page)1349 static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1350 {
1351 	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1352 	struct gmap *sg = NULL;
1353 	int rc = 0;
1354 
1355 	while (1) {
1356 		sg = acquire_gmap_shadow(vcpu, vsie_page);
1357 		if (IS_ERR(sg)) {
1358 			rc = PTR_ERR(sg);
1359 			sg = NULL;
1360 		}
1361 		if (!rc)
1362 			rc = map_prefix(vcpu, vsie_page, sg);
1363 		if (!rc) {
1364 			update_intervention_requests(vsie_page);
1365 			rc = do_vsie_run(vcpu, vsie_page, sg);
1366 		}
1367 		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1368 
1369 		if (rc == -EAGAIN)
1370 			rc = 0;
1371 
1372 		/*
1373 		 * Exit the loop if the guest needs to process the intercept
1374 		 */
1375 		if (rc || scb_s->icptcode)
1376 			break;
1377 
1378 		/*
1379 		 * Exit the loop if the host needs to process an intercept,
1380 		 * but rewind the PSW to re-enter SIE once that's completed
1381 		 * instead of passing a "no action" intercept to the guest.
1382 		 */
1383 		if (kvm_s390_vcpu_has_irq(vcpu, 0) ||
1384 		    kvm_s390_vcpu_sie_inhibited(vcpu)) {
1385 			kvm_s390_rewind_psw(vcpu, 4);
1386 			break;
1387 		}
1388 		if (sg)
1389 			sg = gmap_put(sg);
1390 		cond_resched();
1391 	}
1392 	if (sg)
1393 		sg = gmap_put(sg);
1394 
1395 	if (rc == -EFAULT) {
1396 		/*
1397 		 * Addressing exceptions are always presentes as intercepts.
1398 		 * As addressing exceptions are suppressing and our guest 3 PSW
1399 		 * points at the responsible instruction, we have to
1400 		 * forward the PSW and set the ilc. If we can't read guest 3
1401 		 * instruction, we can use an arbitrary ilc. Let's always use
1402 		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1403 		 * memory. (we could also fake the shadow so the hardware
1404 		 * handles it).
1405 		 */
1406 		scb_s->icptcode = ICPT_PROGI;
1407 		scb_s->iprcc = PGM_ADDRESSING;
1408 		scb_s->pgmilc = 4;
1409 		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1410 		rc = 1;
1411 	}
1412 	return rc;
1413 }
1414 
1415 /* Try getting a given vsie page, returning "true" on success. */
try_get_vsie_page(struct vsie_page * vsie_page)1416 static inline bool try_get_vsie_page(struct vsie_page *vsie_page)
1417 {
1418 	if (test_bit(VSIE_PAGE_IN_USE, &vsie_page->flags))
1419 		return false;
1420 	return !test_and_set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
1421 }
1422 
1423 /* Put a vsie page acquired through get_vsie_page / try_get_vsie_page. */
put_vsie_page(struct vsie_page * vsie_page)1424 static void put_vsie_page(struct vsie_page *vsie_page)
1425 {
1426 	clear_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
1427 }
1428 
1429 /*
1430  * Get or create a vsie page for a scb address.
1431  *
1432  * Returns: - address of a vsie page (cached or new one)
1433  *          - NULL if the same scb address is already used by another VCPU
1434  *          - ERR_PTR(-ENOMEM) if out of memory
1435  */
get_vsie_page(struct kvm * kvm,unsigned long addr)1436 static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1437 {
1438 	struct vsie_page *vsie_page;
1439 	int nr_vcpus;
1440 
1441 	rcu_read_lock();
1442 	vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1443 	rcu_read_unlock();
1444 	if (vsie_page) {
1445 		if (try_get_vsie_page(vsie_page)) {
1446 			if (vsie_page->scb_gpa == addr)
1447 				return vsie_page;
1448 			/*
1449 			 * We raced with someone reusing + putting this vsie
1450 			 * page before we grabbed it.
1451 			 */
1452 			put_vsie_page(vsie_page);
1453 		}
1454 	}
1455 
1456 	/*
1457 	 * We want at least #online_vcpus shadows, so every VCPU can execute
1458 	 * the VSIE in parallel.
1459 	 */
1460 	nr_vcpus = atomic_read(&kvm->online_vcpus);
1461 
1462 	mutex_lock(&kvm->arch.vsie.mutex);
1463 	if (kvm->arch.vsie.page_count < nr_vcpus) {
1464 		vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
1465 		if (!vsie_page) {
1466 			mutex_unlock(&kvm->arch.vsie.mutex);
1467 			return ERR_PTR(-ENOMEM);
1468 		}
1469 		__set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
1470 		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
1471 		kvm->arch.vsie.page_count++;
1472 	} else {
1473 		/* reuse an existing entry that belongs to nobody */
1474 		while (true) {
1475 			vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1476 			if (try_get_vsie_page(vsie_page))
1477 				break;
1478 			kvm->arch.vsie.next++;
1479 			kvm->arch.vsie.next %= nr_vcpus;
1480 		}
1481 		if (vsie_page->scb_gpa != ULONG_MAX)
1482 			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
1483 					  vsie_page->scb_gpa >> 9);
1484 	}
1485 	/* Mark it as invalid until it resides in the tree. */
1486 	vsie_page->scb_gpa = ULONG_MAX;
1487 
1488 	/* Double use of the same address or allocation failure. */
1489 	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, vsie_page)) {
1490 		put_vsie_page(vsie_page);
1491 		mutex_unlock(&kvm->arch.vsie.mutex);
1492 		return NULL;
1493 	}
1494 	vsie_page->scb_gpa = addr;
1495 	mutex_unlock(&kvm->arch.vsie.mutex);
1496 
1497 	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1498 	if (vsie_page->gmap_cache.gmap) {
1499 		scoped_guard(spinlock, &kvm->arch.gmap->children_lock)
1500 			if (vsie_page->gmap_cache.gmap)
1501 				release_gmap_shadow(vsie_page);
1502 	}
1503 	prefix_unmapped(vsie_page);
1504 	vsie_page->fault_addr = 0;
1505 	vsie_page->scb_s.ihcpu = 0xffffU;
1506 	return vsie_page;
1507 }
1508 
kvm_s390_handle_vsie(struct kvm_vcpu * vcpu)1509 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1510 {
1511 	struct vsie_page *vsie_page;
1512 	unsigned long scb_addr;
1513 	int rc;
1514 
1515 	vcpu->stat.instruction_sie++;
1516 	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1517 		return -EOPNOTSUPP;
1518 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1519 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1520 
1521 	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1522 	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1523 
1524 	/* 512 byte alignment */
1525 	if (unlikely(scb_addr & 0x1ffUL))
1526 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1527 
1528 	if (kvm_s390_vcpu_has_irq(vcpu, 0) || kvm_s390_vcpu_sie_inhibited(vcpu)) {
1529 		kvm_s390_rewind_psw(vcpu, 4);
1530 		return 0;
1531 	}
1532 
1533 	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1534 	if (IS_ERR(vsie_page)) {
1535 		return PTR_ERR(vsie_page);
1536 	} else if (!vsie_page) {
1537 		/* double use of sie control block - simply do nothing */
1538 		kvm_s390_rewind_psw(vcpu, 4);
1539 		return 0;
1540 	}
1541 
1542 	rc = pin_scb(vcpu, vsie_page, scb_addr);
1543 	if (rc)
1544 		goto out_put;
1545 	rc = shadow_scb(vcpu, vsie_page);
1546 	if (rc)
1547 		goto out_unpin_scb;
1548 	rc = pin_blocks(vcpu, vsie_page);
1549 	if (rc)
1550 		goto out_unshadow;
1551 	register_shadow_scb(vcpu, vsie_page);
1552 	rc = vsie_run(vcpu, vsie_page);
1553 	unregister_shadow_scb(vcpu);
1554 	unpin_blocks(vcpu, vsie_page);
1555 out_unshadow:
1556 	unshadow_scb(vcpu, vsie_page);
1557 out_unpin_scb:
1558 	unpin_scb(vcpu, vsie_page, scb_addr);
1559 out_put:
1560 	put_vsie_page(vsie_page);
1561 
1562 	return rc < 0 ? rc : 0;
1563 }
1564 
1565 /* Init the vsie data structures. To be called when a vm is initialized. */
kvm_s390_vsie_init(struct kvm * kvm)1566 void kvm_s390_vsie_init(struct kvm *kvm)
1567 {
1568 	mutex_init(&kvm->arch.vsie.mutex);
1569 	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
1570 }
1571 
1572 /* Destroy the vsie data structures. To be called when a vm is destroyed. */
kvm_s390_vsie_destroy(struct kvm * kvm)1573 void kvm_s390_vsie_destroy(struct kvm *kvm)
1574 {
1575 	struct vsie_page *vsie_page;
1576 	int i;
1577 
1578 	mutex_lock(&kvm->arch.vsie.mutex);
1579 	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1580 		vsie_page = kvm->arch.vsie.pages[i];
1581 		scoped_guard(spinlock, &kvm->arch.gmap->children_lock)
1582 			if (vsie_page->gmap_cache.gmap)
1583 				release_gmap_shadow(vsie_page);
1584 		kvm->arch.vsie.pages[i] = NULL;
1585 		/* free the radix tree entry */
1586 		if (vsie_page->scb_gpa != ULONG_MAX)
1587 			radix_tree_delete(&kvm->arch.vsie.addr_to_page,
1588 					  vsie_page->scb_gpa >> 9);
1589 		free_page((unsigned long)vsie_page);
1590 	}
1591 	kvm->arch.vsie.page_count = 0;
1592 	mutex_unlock(&kvm->arch.vsie.mutex);
1593 }
1594 
kvm_s390_vsie_kick(struct kvm_vcpu * vcpu)1595 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1596 {
1597 	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1598 
1599 	/*
1600 	 * Even if the VCPU lets go of the shadow sie block reference, it is
1601 	 * still valid in the cache. So we can safely kick it.
1602 	 */
1603 	if (scb) {
1604 		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1605 		if (scb->prog0c & PROG_IN_SIE)
1606 			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1607 	}
1608 }
1609