1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * handling privileged instructions
4 *
5 * Copyright IBM Corp. 2008, 2020
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 */
10
11 #include <linux/kvm.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/mm_types.h>
15 #include <linux/pgtable.h>
16 #include <linux/io.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/facility.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/page-states.h>
24 #include <asm/gmap.h>
25 #include <asm/ptrace.h>
26 #include <asm/sclp.h>
27 #include <asm/ap.h>
28 #include "gaccess.h"
29 #include "kvm-s390.h"
30 #include "trace.h"
31
handle_ri(struct kvm_vcpu * vcpu)32 static int handle_ri(struct kvm_vcpu *vcpu)
33 {
34 vcpu->stat.instruction_ri++;
35
36 if (test_kvm_facility(vcpu->kvm, 64)) {
37 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
38 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
39 kvm_s390_retry_instr(vcpu);
40 return 0;
41 } else
42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
43 }
44
kvm_s390_handle_aa(struct kvm_vcpu * vcpu)45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
46 {
47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
48 return handle_ri(vcpu);
49 else
50 return -EOPNOTSUPP;
51 }
52
handle_gs(struct kvm_vcpu * vcpu)53 static int handle_gs(struct kvm_vcpu *vcpu)
54 {
55 vcpu->stat.instruction_gs++;
56
57 if (test_kvm_facility(vcpu->kvm, 133)) {
58 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
59 preempt_disable();
60 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
61 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
62 restore_gs_cb(current->thread.gs_cb);
63 preempt_enable();
64 vcpu->arch.sie_block->ecb |= ECB_GS;
65 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
66 vcpu->arch.gs_enabled = 1;
67 kvm_s390_retry_instr(vcpu);
68 return 0;
69 } else
70 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
71 }
72
kvm_s390_handle_e3(struct kvm_vcpu * vcpu)73 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
74 {
75 int code = vcpu->arch.sie_block->ipb & 0xff;
76
77 if (code == 0x49 || code == 0x4d)
78 return handle_gs(vcpu);
79 else
80 return -EOPNOTSUPP;
81 }
82 /* Handle SCK (SET CLOCK) interception */
handle_set_clock(struct kvm_vcpu * vcpu)83 static int handle_set_clock(struct kvm_vcpu *vcpu)
84 {
85 struct kvm_s390_vm_tod_clock gtod = { 0 };
86 int rc;
87 u8 ar;
88 u64 op2;
89
90 vcpu->stat.instruction_sck++;
91
92 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
93 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
94
95 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
96 if (op2 & 7) /* Operand must be on a doubleword boundary */
97 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
98 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
99 if (rc)
100 return kvm_s390_inject_prog_cond(vcpu, rc);
101
102 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
103 /*
104 * To set the TOD clock the kvm lock must be taken, but the vcpu lock
105 * is already held in handle_set_clock. The usual lock order is the
106 * opposite. As SCK is deprecated and should not be used in several
107 * cases, for example when the multiple epoch facility or TOD clock
108 * steering facility is installed (see Principles of Operation), a
109 * slow path can be used. If the lock can not be taken via try_lock,
110 * the instruction will be retried via -EAGAIN at a later point in
111 * time.
112 */
113 if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) {
114 kvm_s390_retry_instr(vcpu);
115 return -EAGAIN;
116 }
117
118 kvm_s390_set_psw_cc(vcpu, 0);
119 return 0;
120 }
121
handle_set_prefix(struct kvm_vcpu * vcpu)122 static int handle_set_prefix(struct kvm_vcpu *vcpu)
123 {
124 u64 operand2;
125 u32 address;
126 int rc;
127 u8 ar;
128
129 vcpu->stat.instruction_spx++;
130
131 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
132 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
133
134 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
135
136 /* must be word boundary */
137 if (operand2 & 3)
138 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
139
140 /* get the value */
141 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
142 if (rc)
143 return kvm_s390_inject_prog_cond(vcpu, rc);
144
145 address &= 0x7fffe000u;
146
147 /*
148 * Make sure the new value is valid memory. We only need to check the
149 * first page, since address is 8k aligned and memory pieces are always
150 * at least 1MB aligned and have at least a size of 1MB.
151 */
152 if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
153 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
154
155 kvm_s390_set_prefix(vcpu, address);
156 trace_kvm_s390_handle_prefix(vcpu, 1, address);
157 return 0;
158 }
159
handle_store_prefix(struct kvm_vcpu * vcpu)160 static int handle_store_prefix(struct kvm_vcpu *vcpu)
161 {
162 u64 operand2;
163 u32 address;
164 int rc;
165 u8 ar;
166
167 vcpu->stat.instruction_stpx++;
168
169 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
170 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
171
172 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
173
174 /* must be word boundary */
175 if (operand2 & 3)
176 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
177
178 address = kvm_s390_get_prefix(vcpu);
179
180 /* get the value */
181 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
182 if (rc)
183 return kvm_s390_inject_prog_cond(vcpu, rc);
184
185 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
186 trace_kvm_s390_handle_prefix(vcpu, 0, address);
187 return 0;
188 }
189
handle_store_cpu_address(struct kvm_vcpu * vcpu)190 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
191 {
192 u16 vcpu_id = vcpu->vcpu_id;
193 u64 ga;
194 int rc;
195 u8 ar;
196
197 vcpu->stat.instruction_stap++;
198
199 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
200 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
201
202 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
203
204 if (ga & 1)
205 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
206
207 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
208 if (rc)
209 return kvm_s390_inject_prog_cond(vcpu, rc);
210
211 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
212 trace_kvm_s390_handle_stap(vcpu, ga);
213 return 0;
214 }
215
kvm_s390_skey_check_enable(struct kvm_vcpu * vcpu)216 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
217 {
218 int rc;
219
220 trace_kvm_s390_skey_related_inst(vcpu);
221 /* Already enabled? */
222 if (vcpu->arch.skey_enabled)
223 return 0;
224
225 rc = s390_enable_skey();
226 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
227 if (rc)
228 return rc;
229
230 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
231 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
232 if (!vcpu->kvm->arch.use_skf)
233 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
234 else
235 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
236 vcpu->arch.skey_enabled = true;
237 return 0;
238 }
239
try_handle_skey(struct kvm_vcpu * vcpu)240 static int try_handle_skey(struct kvm_vcpu *vcpu)
241 {
242 int rc;
243
244 rc = kvm_s390_skey_check_enable(vcpu);
245 if (rc)
246 return rc;
247 if (vcpu->kvm->arch.use_skf) {
248 /* with storage-key facility, SIE interprets it for us */
249 kvm_s390_retry_instr(vcpu);
250 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
251 return -EAGAIN;
252 }
253 return 0;
254 }
255
handle_iske(struct kvm_vcpu * vcpu)256 static int handle_iske(struct kvm_vcpu *vcpu)
257 {
258 unsigned long gaddr, vmaddr;
259 unsigned char key;
260 int reg1, reg2;
261 bool unlocked;
262 int rc;
263
264 vcpu->stat.instruction_iske++;
265
266 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
267 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
268
269 rc = try_handle_skey(vcpu);
270 if (rc)
271 return rc != -EAGAIN ? rc : 0;
272
273 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
274
275 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
276 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
277 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
278 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
279 if (kvm_is_error_hva(vmaddr))
280 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
281 retry:
282 unlocked = false;
283 mmap_read_lock(current->mm);
284 rc = get_guest_storage_key(current->mm, vmaddr, &key);
285
286 if (rc) {
287 rc = fixup_user_fault(current->mm, vmaddr,
288 FAULT_FLAG_WRITE, &unlocked);
289 if (!rc) {
290 mmap_read_unlock(current->mm);
291 goto retry;
292 }
293 }
294 mmap_read_unlock(current->mm);
295 if (rc == -EFAULT)
296 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
297 if (rc < 0)
298 return rc;
299 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
300 vcpu->run->s.regs.gprs[reg1] |= key;
301 return 0;
302 }
303
handle_rrbe(struct kvm_vcpu * vcpu)304 static int handle_rrbe(struct kvm_vcpu *vcpu)
305 {
306 unsigned long vmaddr, gaddr;
307 int reg1, reg2;
308 bool unlocked;
309 int rc;
310
311 vcpu->stat.instruction_rrbe++;
312
313 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
314 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
315
316 rc = try_handle_skey(vcpu);
317 if (rc)
318 return rc != -EAGAIN ? rc : 0;
319
320 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
321
322 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
323 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
324 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
325 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
326 if (kvm_is_error_hva(vmaddr))
327 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
328 retry:
329 unlocked = false;
330 mmap_read_lock(current->mm);
331 rc = reset_guest_reference_bit(current->mm, vmaddr);
332 if (rc < 0) {
333 rc = fixup_user_fault(current->mm, vmaddr,
334 FAULT_FLAG_WRITE, &unlocked);
335 if (!rc) {
336 mmap_read_unlock(current->mm);
337 goto retry;
338 }
339 }
340 mmap_read_unlock(current->mm);
341 if (rc == -EFAULT)
342 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
343 if (rc < 0)
344 return rc;
345 kvm_s390_set_psw_cc(vcpu, rc);
346 return 0;
347 }
348
349 #define SSKE_NQ 0x8
350 #define SSKE_MR 0x4
351 #define SSKE_MC 0x2
352 #define SSKE_MB 0x1
handle_sske(struct kvm_vcpu * vcpu)353 static int handle_sske(struct kvm_vcpu *vcpu)
354 {
355 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
356 unsigned long start, end;
357 unsigned char key, oldkey;
358 int reg1, reg2;
359 bool unlocked;
360 int rc;
361
362 vcpu->stat.instruction_sske++;
363
364 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
365 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
366
367 rc = try_handle_skey(vcpu);
368 if (rc)
369 return rc != -EAGAIN ? rc : 0;
370
371 if (!test_kvm_facility(vcpu->kvm, 8))
372 m3 &= ~SSKE_MB;
373 if (!test_kvm_facility(vcpu->kvm, 10))
374 m3 &= ~(SSKE_MC | SSKE_MR);
375 if (!test_kvm_facility(vcpu->kvm, 14))
376 m3 &= ~SSKE_NQ;
377
378 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
379
380 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
381 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
382 start = kvm_s390_logical_to_effective(vcpu, start);
383 if (m3 & SSKE_MB) {
384 /* start already designates an absolute address */
385 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
386 } else {
387 start = kvm_s390_real_to_abs(vcpu, start);
388 end = start + PAGE_SIZE;
389 }
390
391 while (start != end) {
392 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
393 unlocked = false;
394
395 if (kvm_is_error_hva(vmaddr))
396 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
397
398 mmap_read_lock(current->mm);
399 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
400 m3 & SSKE_NQ, m3 & SSKE_MR,
401 m3 & SSKE_MC);
402
403 if (rc < 0) {
404 rc = fixup_user_fault(current->mm, vmaddr,
405 FAULT_FLAG_WRITE, &unlocked);
406 rc = !rc ? -EAGAIN : rc;
407 }
408 mmap_read_unlock(current->mm);
409 if (rc == -EFAULT)
410 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
411 if (rc == -EAGAIN)
412 continue;
413 if (rc < 0)
414 return rc;
415 start += PAGE_SIZE;
416 }
417
418 if (m3 & (SSKE_MC | SSKE_MR)) {
419 if (m3 & SSKE_MB) {
420 /* skey in reg1 is unpredictable */
421 kvm_s390_set_psw_cc(vcpu, 3);
422 } else {
423 kvm_s390_set_psw_cc(vcpu, rc);
424 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
425 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
426 }
427 }
428 if (m3 & SSKE_MB) {
429 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
430 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
431 else
432 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
433 end = kvm_s390_logical_to_effective(vcpu, end);
434 vcpu->run->s.regs.gprs[reg2] |= end;
435 }
436 return 0;
437 }
438
handle_ipte_interlock(struct kvm_vcpu * vcpu)439 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
440 {
441 vcpu->stat.instruction_ipte_interlock++;
442 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
443 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
444 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm));
445 kvm_s390_retry_instr(vcpu);
446 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
447 return 0;
448 }
449
handle_test_block(struct kvm_vcpu * vcpu)450 static int handle_test_block(struct kvm_vcpu *vcpu)
451 {
452 gpa_t addr;
453 int reg2;
454
455 vcpu->stat.instruction_tb++;
456
457 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
458 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
459
460 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
461 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
462 addr = kvm_s390_logical_to_effective(vcpu, addr);
463 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
464 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
465 addr = kvm_s390_real_to_abs(vcpu, addr);
466
467 if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
468 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
469 /*
470 * We don't expect errors on modern systems, and do not care
471 * about storage keys (yet), so let's just clear the page.
472 */
473 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
474 return -EFAULT;
475 kvm_s390_set_psw_cc(vcpu, 0);
476 vcpu->run->s.regs.gprs[0] = 0;
477 return 0;
478 }
479
handle_tpi(struct kvm_vcpu * vcpu)480 static int handle_tpi(struct kvm_vcpu *vcpu)
481 {
482 struct kvm_s390_interrupt_info *inti;
483 unsigned long len;
484 u32 tpi_data[3];
485 int rc;
486 u64 addr;
487 u8 ar;
488
489 vcpu->stat.instruction_tpi++;
490
491 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
492 if (addr & 3)
493 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
494
495 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
496 if (!inti) {
497 kvm_s390_set_psw_cc(vcpu, 0);
498 return 0;
499 }
500
501 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
502 tpi_data[1] = inti->io.io_int_parm;
503 tpi_data[2] = inti->io.io_int_word;
504 if (addr) {
505 /*
506 * Store the two-word I/O interruption code into the
507 * provided area.
508 */
509 len = sizeof(tpi_data) - 4;
510 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
511 if (rc) {
512 rc = kvm_s390_inject_prog_cond(vcpu, rc);
513 goto reinject_interrupt;
514 }
515 } else {
516 /*
517 * Store the three-word I/O interruption code into
518 * the appropriate lowcore area.
519 */
520 len = sizeof(tpi_data);
521 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
522 /* failed writes to the low core are not recoverable */
523 rc = -EFAULT;
524 goto reinject_interrupt;
525 }
526 }
527
528 /* irq was successfully handed to the guest */
529 kfree(inti);
530 kvm_s390_set_psw_cc(vcpu, 1);
531 return 0;
532 reinject_interrupt:
533 /*
534 * If we encounter a problem storing the interruption code, the
535 * instruction is suppressed from the guest's view: reinject the
536 * interrupt.
537 */
538 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
539 kfree(inti);
540 rc = -EFAULT;
541 }
542 /* don't set the cc, a pgm irq was injected or we drop to user space */
543 return rc ? -EFAULT : 0;
544 }
545
handle_tsch(struct kvm_vcpu * vcpu)546 static int handle_tsch(struct kvm_vcpu *vcpu)
547 {
548 struct kvm_s390_interrupt_info *inti = NULL;
549 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
550
551 vcpu->stat.instruction_tsch++;
552
553 /* a valid schid has at least one bit set */
554 if (vcpu->run->s.regs.gprs[1])
555 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
556 vcpu->run->s.regs.gprs[1]);
557
558 /*
559 * Prepare exit to userspace.
560 * We indicate whether we dequeued a pending I/O interrupt
561 * so that userspace can re-inject it if the instruction gets
562 * a program check. While this may re-order the pending I/O
563 * interrupts, this is no problem since the priority is kept
564 * intact.
565 */
566 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
567 vcpu->run->s390_tsch.dequeued = !!inti;
568 if (inti) {
569 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
570 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
571 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
572 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
573 }
574 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
575 kfree(inti);
576 return -EREMOTE;
577 }
578
handle_io_inst(struct kvm_vcpu * vcpu)579 static int handle_io_inst(struct kvm_vcpu *vcpu)
580 {
581 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
582
583 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
584 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
585
586 if (vcpu->kvm->arch.css_support) {
587 /*
588 * Most I/O instructions will be handled by userspace.
589 * Exceptions are tpi and the interrupt portion of tsch.
590 */
591 if (vcpu->arch.sie_block->ipa == 0xb236)
592 return handle_tpi(vcpu);
593 if (vcpu->arch.sie_block->ipa == 0xb235)
594 return handle_tsch(vcpu);
595 /* Handle in userspace. */
596 vcpu->stat.instruction_io_other++;
597 return -EOPNOTSUPP;
598 } else {
599 /*
600 * Set condition code 3 to stop the guest from issuing channel
601 * I/O instructions.
602 */
603 kvm_s390_set_psw_cc(vcpu, 3);
604 return 0;
605 }
606 }
607
608 /*
609 * handle_pqap: Handling pqap interception
610 * @vcpu: the vcpu having issue the pqap instruction
611 *
612 * We now support PQAP/AQIC instructions and we need to correctly
613 * answer the guest even if no dedicated driver's hook is available.
614 *
615 * The intercepting code calls a dedicated callback for this instruction
616 * if a driver did register one in the CRYPTO satellite of the
617 * SIE block.
618 *
619 * If no callback is available, the queues are not available, return this
620 * response code to the caller and set CC to 3.
621 * Else return the response code returned by the callback.
622 */
handle_pqap(struct kvm_vcpu * vcpu)623 static int handle_pqap(struct kvm_vcpu *vcpu)
624 {
625 struct ap_queue_status status = {};
626 crypto_hook pqap_hook;
627 unsigned long reg0;
628 int ret;
629 uint8_t fc;
630
631 /* Verify that the AP instruction are available */
632 if (!ap_instructions_available())
633 return -EOPNOTSUPP;
634 /* Verify that the guest is allowed to use AP instructions */
635 if (!(vcpu->arch.sie_block->eca & ECA_APIE))
636 return -EOPNOTSUPP;
637 /*
638 * The only possibly intercepted functions when AP instructions are
639 * available for the guest are AQIC and TAPQ with the t bit set
640 * since we do not set IC.3 (FIII) we currently will only intercept
641 * the AQIC function code.
642 * Note: running nested under z/VM can result in intercepts for other
643 * function codes, e.g. PQAP(QCI). We do not support this and bail out.
644 */
645 reg0 = vcpu->run->s.regs.gprs[0];
646 fc = (reg0 >> 24) & 0xff;
647 if (fc != 0x03)
648 return -EOPNOTSUPP;
649
650 /* PQAP instruction is allowed for guest kernel only */
651 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
652 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
653
654 /* Common PQAP instruction specification exceptions */
655 /* bits 41-47 must all be zeros */
656 if (reg0 & 0x007f0000UL)
657 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
658 /* APFT not install and T bit set */
659 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
660 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
661 /* APXA not installed and APID greater 64 or APQI greater 16 */
662 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
663 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
664
665 /* AQIC function code specific exception */
666 /* facility 65 not present for AQIC function code */
667 if (!test_kvm_facility(vcpu->kvm, 65))
668 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
669
670 /*
671 * If the hook callback is registered, there will be a pointer to the
672 * hook function pointer in the kvm_s390_crypto structure. Lock the
673 * owner, retrieve the hook function pointer and call the hook.
674 */
675 down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
676 if (vcpu->kvm->arch.crypto.pqap_hook) {
677 pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
678 ret = pqap_hook(vcpu);
679 if (!ret) {
680 if (vcpu->run->s.regs.gprs[1] & 0x00ff0000)
681 kvm_s390_set_psw_cc(vcpu, 3);
682 else
683 kvm_s390_set_psw_cc(vcpu, 0);
684 }
685 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
686 return ret;
687 }
688 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
689 /*
690 * A vfio_driver must register a hook.
691 * No hook means no driver to enable the SIE CRYCB and no queues.
692 * We send this response to the guest.
693 */
694 status.response_code = 0x01;
695 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
696 kvm_s390_set_psw_cc(vcpu, 3);
697 return 0;
698 }
699
handle_stfl(struct kvm_vcpu * vcpu)700 static int handle_stfl(struct kvm_vcpu *vcpu)
701 {
702 int rc;
703 unsigned int fac;
704
705 vcpu->stat.instruction_stfl++;
706
707 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
708 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
709
710 /*
711 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
712 * into a u32 memory representation. They will remain bits 0-31.
713 */
714 fac = *vcpu->kvm->arch.model.fac_list >> 32;
715 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
716 &fac, sizeof(fac));
717 if (rc)
718 return rc;
719 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
720 trace_kvm_s390_handle_stfl(vcpu, fac);
721 return 0;
722 }
723
724 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
725 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
726 #define PSW_ADDR_24 0x0000000000ffffffUL
727 #define PSW_ADDR_31 0x000000007fffffffUL
728
is_valid_psw(psw_t * psw)729 int is_valid_psw(psw_t *psw)
730 {
731 if (psw->mask & PSW_MASK_UNASSIGNED)
732 return 0;
733 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
734 if (psw->addr & ~PSW_ADDR_31)
735 return 0;
736 }
737 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
738 return 0;
739 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
740 return 0;
741 if (psw->addr & 1)
742 return 0;
743 return 1;
744 }
745
kvm_s390_handle_lpsw(struct kvm_vcpu * vcpu)746 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
747 {
748 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
749 psw_compat_t new_psw;
750 u64 addr;
751 int rc;
752 u8 ar;
753
754 vcpu->stat.instruction_lpsw++;
755
756 if (gpsw->mask & PSW_MASK_PSTATE)
757 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
758
759 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
760 if (addr & 7)
761 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
762
763 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
764 if (rc)
765 return kvm_s390_inject_prog_cond(vcpu, rc);
766 if (!(new_psw.mask & PSW32_MASK_BASE))
767 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
768 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
769 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
770 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
771 if (!is_valid_psw(gpsw))
772 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
773 return 0;
774 }
775
handle_lpswe(struct kvm_vcpu * vcpu)776 static int handle_lpswe(struct kvm_vcpu *vcpu)
777 {
778 psw_t new_psw;
779 u64 addr;
780 int rc;
781 u8 ar;
782
783 vcpu->stat.instruction_lpswe++;
784
785 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
786 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
787
788 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
789 if (addr & 7)
790 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
791 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
792 if (rc)
793 return kvm_s390_inject_prog_cond(vcpu, rc);
794 vcpu->arch.sie_block->gpsw = new_psw;
795 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
796 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
797 return 0;
798 }
799
handle_lpswey(struct kvm_vcpu * vcpu)800 static int handle_lpswey(struct kvm_vcpu *vcpu)
801 {
802 psw_t new_psw;
803 u64 addr;
804 int rc;
805 u8 ar;
806
807 vcpu->stat.instruction_lpswey++;
808
809 if (!test_kvm_facility(vcpu->kvm, 193))
810 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
811
812 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
813 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
814
815 addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
816 if (addr & 7)
817 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
818
819 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
820 if (rc)
821 return kvm_s390_inject_prog_cond(vcpu, rc);
822
823 vcpu->arch.sie_block->gpsw = new_psw;
824 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
825 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
826
827 return 0;
828 }
829
handle_stidp(struct kvm_vcpu * vcpu)830 static int handle_stidp(struct kvm_vcpu *vcpu)
831 {
832 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
833 u64 operand2;
834 int rc;
835 u8 ar;
836
837 vcpu->stat.instruction_stidp++;
838
839 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
840 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
841
842 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
843
844 if (operand2 & 7)
845 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
846
847 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
848 if (rc)
849 return kvm_s390_inject_prog_cond(vcpu, rc);
850
851 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
852 return 0;
853 }
854
handle_stsi_3_2_2(struct kvm_vcpu * vcpu,struct sysinfo_3_2_2 * mem)855 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
856 {
857 int cpus = 0;
858 int n;
859
860 cpus = atomic_read(&vcpu->kvm->online_vcpus);
861
862 /* deal with other level 3 hypervisors */
863 if (stsi(mem, 3, 2, 2))
864 mem->count = 0;
865 if (mem->count < 8)
866 mem->count++;
867 for (n = mem->count - 1; n > 0 ; n--)
868 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
869
870 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
871 mem->vm[0].cpus_total = cpus;
872 mem->vm[0].cpus_configured = cpus;
873 mem->vm[0].cpus_standby = 0;
874 mem->vm[0].cpus_reserved = 0;
875 mem->vm[0].caf = 1000;
876 memcpy(mem->vm[0].name, "KVMguest", 8);
877 ASCEBC(mem->vm[0].name, 8);
878 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
879 ASCEBC(mem->vm[0].cpi, 16);
880 }
881
insert_stsi_usr_data(struct kvm_vcpu * vcpu,u64 addr,u8 ar,u8 fc,u8 sel1,u16 sel2)882 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
883 u8 fc, u8 sel1, u16 sel2)
884 {
885 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
886 vcpu->run->s390_stsi.addr = addr;
887 vcpu->run->s390_stsi.ar = ar;
888 vcpu->run->s390_stsi.fc = fc;
889 vcpu->run->s390_stsi.sel1 = sel1;
890 vcpu->run->s390_stsi.sel2 = sel2;
891 }
892
handle_stsi(struct kvm_vcpu * vcpu)893 static int handle_stsi(struct kvm_vcpu *vcpu)
894 {
895 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
896 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
897 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
898 unsigned long mem = 0;
899 u64 operand2;
900 int rc = 0;
901 u8 ar;
902
903 vcpu->stat.instruction_stsi++;
904 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
905
906 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
907 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
908
909 /* Bailout forbidden function codes */
910 if (fc > 3 && fc != 15)
911 goto out_no_data;
912
913 /*
914 * fc 15 is provided only with
915 * - PTF/CPU topology support through facility 15
916 * - KVM_CAP_S390_USER_STSI
917 */
918 if (fc == 15 && (!test_kvm_facility(vcpu->kvm, 11) ||
919 !vcpu->kvm->arch.user_stsi))
920 goto out_no_data;
921
922 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
923 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
924 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
925
926 if (fc == 0) {
927 vcpu->run->s.regs.gprs[0] = 3 << 28;
928 kvm_s390_set_psw_cc(vcpu, 0);
929 return 0;
930 }
931
932 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
933
934 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
935 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
936
937 switch (fc) {
938 case 1: /* same handling for 1 and 2 */
939 case 2:
940 mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
941 if (!mem)
942 goto out_no_data;
943 if (stsi((void *) mem, fc, sel1, sel2))
944 goto out_no_data;
945 break;
946 case 3:
947 if (sel1 != 2 || sel2 != 2)
948 goto out_no_data;
949 mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
950 if (!mem)
951 goto out_no_data;
952 handle_stsi_3_2_2(vcpu, (void *) mem);
953 break;
954 case 15: /* fc 15 is fully handled in userspace */
955 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
956 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
957 return -EREMOTE;
958 }
959 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
960 memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE);
961 rc = 0;
962 } else {
963 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
964 }
965 if (rc) {
966 rc = kvm_s390_inject_prog_cond(vcpu, rc);
967 goto out;
968 }
969 if (vcpu->kvm->arch.user_stsi) {
970 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
971 rc = -EREMOTE;
972 }
973 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
974 free_page(mem);
975 kvm_s390_set_psw_cc(vcpu, 0);
976 vcpu->run->s.regs.gprs[0] = 0;
977 return rc;
978 out_no_data:
979 kvm_s390_set_psw_cc(vcpu, 3);
980 out:
981 free_page(mem);
982 return rc;
983 }
984
kvm_s390_handle_b2(struct kvm_vcpu * vcpu)985 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
986 {
987 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
988 case 0x02:
989 return handle_stidp(vcpu);
990 case 0x04:
991 return handle_set_clock(vcpu);
992 case 0x10:
993 return handle_set_prefix(vcpu);
994 case 0x11:
995 return handle_store_prefix(vcpu);
996 case 0x12:
997 return handle_store_cpu_address(vcpu);
998 case 0x14:
999 return kvm_s390_handle_vsie(vcpu);
1000 case 0x21:
1001 case 0x50:
1002 return handle_ipte_interlock(vcpu);
1003 case 0x29:
1004 return handle_iske(vcpu);
1005 case 0x2a:
1006 return handle_rrbe(vcpu);
1007 case 0x2b:
1008 return handle_sske(vcpu);
1009 case 0x2c:
1010 return handle_test_block(vcpu);
1011 case 0x30:
1012 case 0x31:
1013 case 0x32:
1014 case 0x33:
1015 case 0x34:
1016 case 0x35:
1017 case 0x36:
1018 case 0x37:
1019 case 0x38:
1020 case 0x39:
1021 case 0x3a:
1022 case 0x3b:
1023 case 0x3c:
1024 case 0x5f:
1025 case 0x74:
1026 case 0x76:
1027 return handle_io_inst(vcpu);
1028 case 0x56:
1029 return handle_sthyi(vcpu);
1030 case 0x7d:
1031 return handle_stsi(vcpu);
1032 case 0xaf:
1033 return handle_pqap(vcpu);
1034 case 0xb1:
1035 return handle_stfl(vcpu);
1036 case 0xb2:
1037 return handle_lpswe(vcpu);
1038 default:
1039 return -EOPNOTSUPP;
1040 }
1041 }
1042
handle_epsw(struct kvm_vcpu * vcpu)1043 static int handle_epsw(struct kvm_vcpu *vcpu)
1044 {
1045 int reg1, reg2;
1046
1047 vcpu->stat.instruction_epsw++;
1048
1049 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
1050
1051 /* This basically extracts the mask half of the psw. */
1052 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
1053 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
1054 if (reg2) {
1055 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
1056 vcpu->run->s.regs.gprs[reg2] |=
1057 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
1058 }
1059 return 0;
1060 }
1061
1062 #define PFMF_RESERVED 0xfffc0101UL
1063 #define PFMF_SK 0x00020000UL
1064 #define PFMF_CF 0x00010000UL
1065 #define PFMF_UI 0x00008000UL
1066 #define PFMF_FSC 0x00007000UL
1067 #define PFMF_NQ 0x00000800UL
1068 #define PFMF_MR 0x00000400UL
1069 #define PFMF_MC 0x00000200UL
1070 #define PFMF_KEY 0x000000feUL
1071
handle_pfmf(struct kvm_vcpu * vcpu)1072 static int handle_pfmf(struct kvm_vcpu *vcpu)
1073 {
1074 bool mr = false, mc = false, nq;
1075 int reg1, reg2;
1076 unsigned long start, end;
1077 unsigned char key;
1078
1079 vcpu->stat.instruction_pfmf++;
1080
1081 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
1082
1083 if (!test_kvm_facility(vcpu->kvm, 8))
1084 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1085
1086 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1087 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1088
1089 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1090 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1091
1092 /* Only provide non-quiescing support if enabled for the guest */
1093 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1094 !test_kvm_facility(vcpu->kvm, 14))
1095 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1096
1097 /* Only provide conditional-SSKE support if enabled for the guest */
1098 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1099 test_kvm_facility(vcpu->kvm, 10)) {
1100 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1101 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1102 }
1103
1104 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1105 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1106 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1107 start = kvm_s390_logical_to_effective(vcpu, start);
1108
1109 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1110 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1111 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1112 }
1113
1114 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1115 case 0x00000000:
1116 /* only 4k frames specify a real address */
1117 start = kvm_s390_real_to_abs(vcpu, start);
1118 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1119 break;
1120 case 0x00001000:
1121 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
1122 break;
1123 case 0x00002000:
1124 /* only support 2G frame size if EDAT2 is available and we are
1125 not in 24-bit addressing mode */
1126 if (!test_kvm_facility(vcpu->kvm, 78) ||
1127 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1128 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1129 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
1130 break;
1131 default:
1132 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1133 }
1134
1135 while (start != end) {
1136 unsigned long vmaddr;
1137 bool unlocked = false;
1138
1139 /* Translate guest address to host address */
1140 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1141 if (kvm_is_error_hva(vmaddr))
1142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1143
1144 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1145 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1146 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1147 }
1148
1149 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1150 int rc = kvm_s390_skey_check_enable(vcpu);
1151
1152 if (rc)
1153 return rc;
1154 mmap_read_lock(current->mm);
1155 rc = cond_set_guest_storage_key(current->mm, vmaddr,
1156 key, NULL, nq, mr, mc);
1157 if (rc < 0) {
1158 rc = fixup_user_fault(current->mm, vmaddr,
1159 FAULT_FLAG_WRITE, &unlocked);
1160 rc = !rc ? -EAGAIN : rc;
1161 }
1162 mmap_read_unlock(current->mm);
1163 if (rc == -EFAULT)
1164 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1165 if (rc == -EAGAIN)
1166 continue;
1167 if (rc < 0)
1168 return rc;
1169 }
1170 start += PAGE_SIZE;
1171 }
1172 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1173 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1174 vcpu->run->s.regs.gprs[reg2] = end;
1175 } else {
1176 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1177 end = kvm_s390_logical_to_effective(vcpu, end);
1178 vcpu->run->s.regs.gprs[reg2] |= end;
1179 }
1180 }
1181 return 0;
1182 }
1183
1184 /*
1185 * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
1186 */
__do_essa(struct kvm_vcpu * vcpu,const int orc)1187 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1188 {
1189 int r1, r2, nappended, entries;
1190 unsigned long gfn, hva, res, pgstev, ptev;
1191 unsigned long *cbrlo;
1192
1193 /*
1194 * We don't need to set SD.FPF.SK to 1 here, because if we have a
1195 * machine check here we either handle it or crash
1196 */
1197
1198 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1199 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1200 hva = gfn_to_hva(vcpu->kvm, gfn);
1201 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1202
1203 if (kvm_is_error_hva(hva))
1204 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1205
1206 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1207 if (nappended < 0) {
1208 res = orc ? 0x10 : 0;
1209 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1210 return 0;
1211 }
1212 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1213 /*
1214 * Set the block-content state part of the result. 0 means resident, so
1215 * nothing to do if the page is valid. 2 is for preserved pages
1216 * (non-present and non-zero), and 3 for zero pages (non-present and
1217 * zero).
1218 */
1219 if (ptev & _PAGE_INVALID) {
1220 res |= 2;
1221 if (pgstev & _PGSTE_GPS_ZERO)
1222 res |= 1;
1223 }
1224 if (pgstev & _PGSTE_GPS_NODAT)
1225 res |= 0x20;
1226 vcpu->run->s.regs.gprs[r1] = res;
1227 /*
1228 * It is possible that all the normal 511 slots were full, in which case
1229 * we will now write in the 512th slot, which is reserved for host use.
1230 * In both cases we let the normal essa handling code process all the
1231 * slots, including the reserved one, if needed.
1232 */
1233 if (nappended > 0) {
1234 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1235 cbrlo[entries] = gfn << PAGE_SHIFT;
1236 }
1237
1238 if (orc) {
1239 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1240
1241 /* Increment only if we are really flipping the bit */
1242 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1243 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1244 }
1245
1246 return nappended;
1247 }
1248
handle_essa(struct kvm_vcpu * vcpu)1249 static int handle_essa(struct kvm_vcpu *vcpu)
1250 {
1251 /* entries expected to be 1FF */
1252 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1253 unsigned long *cbrlo;
1254 struct gmap *gmap;
1255 int i, orc;
1256
1257 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1258 gmap = vcpu->arch.gmap;
1259 vcpu->stat.instruction_essa++;
1260 if (!vcpu->kvm->arch.use_cmma)
1261 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1262
1263 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1264 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1265 /* Check for invalid operation request code */
1266 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1267 /* ORCs 0-6 are always valid */
1268 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1269 : ESSA_SET_STABLE_IF_RESIDENT))
1270 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1271
1272 if (!vcpu->kvm->arch.migration_mode) {
1273 /*
1274 * CMMA is enabled in the KVM settings, but is disabled in
1275 * the SIE block and in the mm_context, and we are not doing
1276 * a migration. Enable CMMA in the mm_context.
1277 * Since we need to take a write lock to write to the context
1278 * to avoid races with storage keys handling, we check if the
1279 * value really needs to be written to; if the value is
1280 * already correct, we do nothing and avoid the lock.
1281 */
1282 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1283 mmap_write_lock(vcpu->kvm->mm);
1284 vcpu->kvm->mm->context.uses_cmm = 1;
1285 mmap_write_unlock(vcpu->kvm->mm);
1286 }
1287 /*
1288 * If we are here, we are supposed to have CMMA enabled in
1289 * the SIE block. Enabling CMMA works on a per-CPU basis,
1290 * while the context use_cmma flag is per process.
1291 * It's possible that the context flag is enabled and the
1292 * SIE flag is not, so we set the flag always; if it was
1293 * already set, nothing changes, otherwise we enable it
1294 * on this CPU too.
1295 */
1296 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1297 /* Retry the ESSA instruction */
1298 kvm_s390_retry_instr(vcpu);
1299 } else {
1300 int srcu_idx;
1301
1302 mmap_read_lock(vcpu->kvm->mm);
1303 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1304 i = __do_essa(vcpu, orc);
1305 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1306 mmap_read_unlock(vcpu->kvm->mm);
1307 if (i < 0)
1308 return i;
1309 /* Account for the possible extra cbrl entry */
1310 entries += i;
1311 }
1312 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1313 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1314 mmap_read_lock(gmap->mm);
1315 for (i = 0; i < entries; ++i)
1316 __gmap_zap(gmap, cbrlo[i]);
1317 mmap_read_unlock(gmap->mm);
1318 return 0;
1319 }
1320
kvm_s390_handle_b9(struct kvm_vcpu * vcpu)1321 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1322 {
1323 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1324 case 0x8a:
1325 case 0x8e:
1326 case 0x8f:
1327 return handle_ipte_interlock(vcpu);
1328 case 0x8d:
1329 return handle_epsw(vcpu);
1330 case 0xab:
1331 return handle_essa(vcpu);
1332 case 0xaf:
1333 return handle_pfmf(vcpu);
1334 default:
1335 return -EOPNOTSUPP;
1336 }
1337 }
1338
kvm_s390_handle_lctl(struct kvm_vcpu * vcpu)1339 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1340 {
1341 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1342 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1343 int reg, rc, nr_regs;
1344 u32 ctl_array[16];
1345 u64 ga;
1346 u8 ar;
1347
1348 vcpu->stat.instruction_lctl++;
1349
1350 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1351 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1352
1353 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1354
1355 if (ga & 3)
1356 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1357
1358 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1359 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1360
1361 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1362 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1363 if (rc)
1364 return kvm_s390_inject_prog_cond(vcpu, rc);
1365 reg = reg1;
1366 nr_regs = 0;
1367 do {
1368 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1369 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1370 if (reg == reg3)
1371 break;
1372 reg = (reg + 1) % 16;
1373 } while (1);
1374 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1375 return 0;
1376 }
1377
kvm_s390_handle_stctl(struct kvm_vcpu * vcpu)1378 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1379 {
1380 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1381 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1382 int reg, rc, nr_regs;
1383 u32 ctl_array[16];
1384 u64 ga;
1385 u8 ar;
1386
1387 vcpu->stat.instruction_stctl++;
1388
1389 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1390 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1391
1392 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1393
1394 if (ga & 3)
1395 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1396
1397 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1398 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1399
1400 reg = reg1;
1401 nr_regs = 0;
1402 do {
1403 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1404 if (reg == reg3)
1405 break;
1406 reg = (reg + 1) % 16;
1407 } while (1);
1408 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1409 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1410 }
1411
handle_lctlg(struct kvm_vcpu * vcpu)1412 static int handle_lctlg(struct kvm_vcpu *vcpu)
1413 {
1414 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1415 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1416 int reg, rc, nr_regs;
1417 u64 ctl_array[16];
1418 u64 ga;
1419 u8 ar;
1420
1421 vcpu->stat.instruction_lctlg++;
1422
1423 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1424 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1425
1426 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1427
1428 if (ga & 7)
1429 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1430
1431 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1432 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1433
1434 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1435 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1436 if (rc)
1437 return kvm_s390_inject_prog_cond(vcpu, rc);
1438 reg = reg1;
1439 nr_regs = 0;
1440 do {
1441 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1442 if (reg == reg3)
1443 break;
1444 reg = (reg + 1) % 16;
1445 } while (1);
1446 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1447 return 0;
1448 }
1449
handle_stctg(struct kvm_vcpu * vcpu)1450 static int handle_stctg(struct kvm_vcpu *vcpu)
1451 {
1452 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1453 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1454 int reg, rc, nr_regs;
1455 u64 ctl_array[16];
1456 u64 ga;
1457 u8 ar;
1458
1459 vcpu->stat.instruction_stctg++;
1460
1461 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1462 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1463
1464 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1465
1466 if (ga & 7)
1467 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1468
1469 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1470 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1471
1472 reg = reg1;
1473 nr_regs = 0;
1474 do {
1475 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1476 if (reg == reg3)
1477 break;
1478 reg = (reg + 1) % 16;
1479 } while (1);
1480 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1481 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1482 }
1483
kvm_s390_handle_eb(struct kvm_vcpu * vcpu)1484 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1485 {
1486 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1487 case 0x25:
1488 return handle_stctg(vcpu);
1489 case 0x2f:
1490 return handle_lctlg(vcpu);
1491 case 0x60:
1492 case 0x61:
1493 case 0x62:
1494 return handle_ri(vcpu);
1495 case 0x71:
1496 return handle_lpswey(vcpu);
1497 default:
1498 return -EOPNOTSUPP;
1499 }
1500 }
1501
handle_tprot(struct kvm_vcpu * vcpu)1502 static int handle_tprot(struct kvm_vcpu *vcpu)
1503 {
1504 u64 address, operand2;
1505 unsigned long gpa;
1506 u8 access_key;
1507 bool writable;
1508 int ret, cc;
1509 u8 ar;
1510
1511 vcpu->stat.instruction_tprot++;
1512
1513 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1514 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1515
1516 kvm_s390_get_base_disp_sse(vcpu, &address, &operand2, &ar, NULL);
1517 access_key = (operand2 & 0xf0) >> 4;
1518
1519 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1520 ipte_lock(vcpu->kvm);
1521
1522 ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
1523 GACC_STORE, access_key);
1524 if (ret == 0) {
1525 gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1526 } else if (ret == PGM_PROTECTION) {
1527 writable = false;
1528 /* Write protected? Try again with read-only... */
1529 ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
1530 GACC_FETCH, access_key);
1531 }
1532 if (ret >= 0) {
1533 cc = -1;
1534
1535 /* Fetching permitted; storing permitted */
1536 if (ret == 0 && writable)
1537 cc = 0;
1538 /* Fetching permitted; storing not permitted */
1539 else if (ret == 0 && !writable)
1540 cc = 1;
1541 /* Fetching not permitted; storing not permitted */
1542 else if (ret == PGM_PROTECTION)
1543 cc = 2;
1544 /* Translation not available */
1545 else if (ret != PGM_ADDRESSING && ret != PGM_TRANSLATION_SPEC)
1546 cc = 3;
1547
1548 if (cc != -1) {
1549 kvm_s390_set_psw_cc(vcpu, cc);
1550 ret = 0;
1551 } else {
1552 ret = kvm_s390_inject_program_int(vcpu, ret);
1553 }
1554 }
1555
1556 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1557 ipte_unlock(vcpu->kvm);
1558 return ret;
1559 }
1560
kvm_s390_handle_e5(struct kvm_vcpu * vcpu)1561 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1562 {
1563 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1564 case 0x01:
1565 return handle_tprot(vcpu);
1566 default:
1567 return -EOPNOTSUPP;
1568 }
1569 }
1570
handle_sckpf(struct kvm_vcpu * vcpu)1571 static int handle_sckpf(struct kvm_vcpu *vcpu)
1572 {
1573 u32 value;
1574
1575 vcpu->stat.instruction_sckpf++;
1576
1577 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1578 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1579
1580 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1581 return kvm_s390_inject_program_int(vcpu,
1582 PGM_SPECIFICATION);
1583
1584 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1585 vcpu->arch.sie_block->todpr = value;
1586
1587 return 0;
1588 }
1589
handle_ptff(struct kvm_vcpu * vcpu)1590 static int handle_ptff(struct kvm_vcpu *vcpu)
1591 {
1592 vcpu->stat.instruction_ptff++;
1593
1594 /* we don't emulate any control instructions yet */
1595 kvm_s390_set_psw_cc(vcpu, 3);
1596 return 0;
1597 }
1598
kvm_s390_handle_01(struct kvm_vcpu * vcpu)1599 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1600 {
1601 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1602 case 0x04:
1603 return handle_ptff(vcpu);
1604 case 0x07:
1605 return handle_sckpf(vcpu);
1606 default:
1607 return -EOPNOTSUPP;
1608 }
1609 }
1610