xref: /linux/tools/testing/selftests/kvm/s390x/ucontrol_test.c (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test code for the s390x kvm ucontrol interface
4  *
5  * Copyright IBM Corp. 2024
6  *
7  * Authors:
8  *  Christoph Schlameuss <schlameuss@linux.ibm.com>
9  */
10 #include "debug_print.h"
11 #include "kselftest_harness.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "sie.h"
15 
16 #include <linux/capability.h>
17 #include <linux/sizes.h>
18 
19 #define PGM_SEGMENT_TRANSLATION 0x10
20 
21 #define VM_MEM_SIZE (4 * SZ_1M)
22 #define VM_MEM_EXT_SIZE (2 * SZ_1M)
23 #define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)
24 
25 /* so directly declare capget to check caps without libcap */
26 int capget(cap_user_header_t header, cap_user_data_t data);
27 
28 /**
29  * In order to create user controlled virtual machines on S390,
30  * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
31  * as privileged user (SYS_ADMIN).
32  */
33 void require_ucontrol_admin(void)
34 {
35 	struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
36 	struct __user_cap_header_struct hdr = {
37 		.version = _LINUX_CAPABILITY_VERSION_3,
38 	};
39 	int rc;
40 
41 	rc = capget(&hdr, data);
42 	TEST_ASSERT_EQ(0, rc);
43 	TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
44 
45 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
46 }
47 
48 /* Test program setting some registers and looping */
49 extern char test_gprs_asm[];
50 asm("test_gprs_asm:\n"
51 	"xgr	%r0, %r0\n"
52 	"lgfi	%r1,1\n"
53 	"lgfi	%r2,2\n"
54 	"lgfi	%r3,3\n"
55 	"lgfi	%r4,4\n"
56 	"lgfi	%r5,5\n"
57 	"lgfi	%r6,6\n"
58 	"lgfi	%r7,7\n"
59 	"0:\n"
60 	"	diag	0,0,0x44\n"
61 	"	ahi	%r0,1\n"
62 	"	j	0b\n"
63 );
64 
65 /* Test program manipulating memory */
66 extern char test_mem_asm[];
67 asm("test_mem_asm:\n"
68 	"xgr	%r0, %r0\n"
69 
70 	"0:\n"
71 	"	ahi	%r0,1\n"
72 	"	st	%r1,0(%r5,%r6)\n"
73 
74 	"	xgr	%r1,%r1\n"
75 	"	l	%r1,0(%r5,%r6)\n"
76 	"	ahi	%r0,1\n"
77 	"	diag	0,0,0x44\n"
78 
79 	"	j	0b\n"
80 );
81 
82 /* Test program manipulating storage keys */
83 extern char test_skey_asm[];
84 asm("test_skey_asm:\n"
85 	"xgr	%r0, %r0\n"
86 
87 	"0:\n"
88 	"	ahi	%r0,1\n"
89 	"	st	%r1,0(%r5,%r6)\n"
90 
91 	"	iske	%r1,%r6\n"
92 	"	ahi	%r0,1\n"
93 	"	diag	0,0,0x44\n"
94 
95 	"	sske	%r1,%r6\n"
96 	"	xgr	%r1,%r1\n"
97 	"	iske	%r1,%r6\n"
98 	"	ahi	%r0,1\n"
99 	"	diag	0,0,0x44\n"
100 
101 	"	rrbe	%r1,%r6\n"
102 	"	iske	%r1,%r6\n"
103 	"	ahi	%r0,1\n"
104 	"	diag	0,0,0x44\n"
105 
106 	"	j	0b\n"
107 );
108 
109 FIXTURE(uc_kvm)
110 {
111 	struct kvm_s390_sie_block *sie_block;
112 	struct kvm_run *run;
113 	uintptr_t base_gpa;
114 	uintptr_t code_gpa;
115 	uintptr_t base_hva;
116 	uintptr_t code_hva;
117 	int kvm_run_size;
118 	vm_paddr_t pgd;
119 	void *vm_mem;
120 	int vcpu_fd;
121 	int kvm_fd;
122 	int vm_fd;
123 };
124 
125 /**
126  * create VM with single vcpu, map kvm_run and SIE control block for easy access
127  */
128 FIXTURE_SETUP(uc_kvm)
129 {
130 	struct kvm_s390_vm_cpu_processor info;
131 	int rc;
132 
133 	require_ucontrol_admin();
134 
135 	self->kvm_fd = open_kvm_dev_path_or_exit();
136 	self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
137 	ASSERT_GE(self->vm_fd, 0);
138 
139 	kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
140 			    KVM_S390_VM_CPU_PROCESSOR, &info);
141 	TH_LOG("create VM 0x%llx", info.cpuid);
142 
143 	self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
144 	ASSERT_GE(self->vcpu_fd, 0);
145 
146 	self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
147 	ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
148 		  TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
149 	self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
150 		    PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
151 	ASSERT_NE(self->run, MAP_FAILED);
152 	/**
153 	 * For virtual cpus that have been created with S390 user controlled
154 	 * virtual machines, the resulting vcpu fd can be memory mapped at page
155 	 * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
156 	 * the virtual cpu's hardware control block.
157 	 */
158 	self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
159 			  PROT_READ | PROT_WRITE, MAP_SHARED,
160 			  self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
161 	ASSERT_NE(self->sie_block, MAP_FAILED);
162 
163 	TH_LOG("VM created %p %p", self->run, self->sie_block);
164 
165 	self->base_gpa = 0;
166 	self->code_gpa = self->base_gpa + (3 * SZ_1M);
167 
168 	self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M);
169 	ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
170 	self->base_hva = (uintptr_t)self->vm_mem;
171 	self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
172 	struct kvm_s390_ucas_mapping map = {
173 		.user_addr = self->base_hva,
174 		.vcpu_addr = self->base_gpa,
175 		.length = VM_MEM_SIZE,
176 	};
177 	TH_LOG("ucas map %p %p 0x%llx",
178 	       (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
179 	rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
180 	ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
181 				rc, strerror(errno));
182 
183 	TH_LOG("page in %p", (void *)self->base_gpa);
184 	rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
185 	ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
186 				(void *)self->base_hva, rc, strerror(errno));
187 
188 	self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
189 }
190 
191 FIXTURE_TEARDOWN(uc_kvm)
192 {
193 	munmap(self->sie_block, PAGE_SIZE);
194 	munmap(self->run, self->kvm_run_size);
195 	close(self->vcpu_fd);
196 	close(self->vm_fd);
197 	close(self->kvm_fd);
198 	free(self->vm_mem);
199 }
200 
201 TEST_F(uc_kvm, uc_sie_assertions)
202 {
203 	/* assert interception of Code 08 (Program Interruption) is set */
204 	EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
205 }
206 
207 TEST_F(uc_kvm, uc_attr_mem_limit)
208 {
209 	u64 limit;
210 	struct kvm_device_attr attr = {
211 		.group = KVM_S390_VM_MEM_CTRL,
212 		.attr = KVM_S390_VM_MEM_LIMIT_SIZE,
213 		.addr = (unsigned long)&limit,
214 	};
215 	int rc;
216 
217 	rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
218 	EXPECT_EQ(0, rc);
219 	EXPECT_EQ(~0UL, limit);
220 
221 	/* assert set not supported */
222 	rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
223 	EXPECT_EQ(-1, rc);
224 	EXPECT_EQ(EINVAL, errno);
225 }
226 
227 TEST_F(uc_kvm, uc_no_dirty_log)
228 {
229 	struct kvm_dirty_log dlog;
230 	int rc;
231 
232 	rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
233 	EXPECT_EQ(-1, rc);
234 	EXPECT_EQ(EINVAL, errno);
235 }
236 
237 /**
238  * Assert HPAGE CAP cannot be enabled on UCONTROL VM
239  */
240 TEST(uc_cap_hpage)
241 {
242 	int rc, kvm_fd, vm_fd, vcpu_fd;
243 	struct kvm_enable_cap cap = {
244 		.cap = KVM_CAP_S390_HPAGE_1M,
245 	};
246 
247 	require_ucontrol_admin();
248 
249 	kvm_fd = open_kvm_dev_path_or_exit();
250 	vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
251 	ASSERT_GE(vm_fd, 0);
252 
253 	/* assert hpages are not supported on ucontrol vm */
254 	rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
255 	EXPECT_EQ(0, rc);
256 
257 	/* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
258 	rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
259 	EXPECT_EQ(-1, rc);
260 	EXPECT_EQ(EINVAL, errno);
261 
262 	/* assert HPAGE CAP is rejected after vCPU creation */
263 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
264 	ASSERT_GE(vcpu_fd, 0);
265 	rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
266 	EXPECT_EQ(-1, rc);
267 	EXPECT_EQ(EBUSY, errno);
268 
269 	close(vcpu_fd);
270 	close(vm_fd);
271 	close(kvm_fd);
272 }
273 
274 /* calculate host virtual addr from guest physical addr */
275 static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
276 {
277 	return (void *)(self->base_hva - self->base_gpa + gpa);
278 }
279 
280 /* map / make additional memory available */
281 static int uc_map_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
282 {
283 	struct kvm_s390_ucas_mapping map = {
284 		.user_addr = (u64)gpa2hva(self, vcpu_addr),
285 		.vcpu_addr = vcpu_addr,
286 		.length = length,
287 	};
288 	pr_info("ucas map %p %p 0x%llx",
289 		(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
290 	return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
291 }
292 
293 /* unmap previously mapped memory */
294 static int uc_unmap_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
295 {
296 	struct kvm_s390_ucas_mapping map = {
297 		.user_addr = (u64)gpa2hva(self, vcpu_addr),
298 		.vcpu_addr = vcpu_addr,
299 		.length = length,
300 	};
301 	pr_info("ucas unmap %p %p 0x%llx",
302 		(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
303 	return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map);
304 }
305 
306 /* handle ucontrol exit by mapping the accessed segment */
307 static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) *self)
308 {
309 	struct kvm_run *run = self->run;
310 	u64 seg_addr;
311 	int rc;
312 
313 	TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
314 	switch (run->s390_ucontrol.pgm_code) {
315 	case PGM_SEGMENT_TRANSLATION:
316 		seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1);
317 		pr_info("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n",
318 			run->s390_ucontrol.trans_exc_code, seg_addr);
319 		/* map / make additional memory available */
320 		rc = uc_map_ext(self, seg_addr, SZ_1M);
321 		TEST_ASSERT_EQ(0, rc);
322 		break;
323 	default:
324 		TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
325 	}
326 }
327 
328 /*
329  * Handle the SIEIC exit
330  * * fail on codes not expected in the test cases
331  * Returns if interception is handled / execution can be continued
332  */
333 static void uc_skey_enable(FIXTURE_DATA(uc_kvm) *self)
334 {
335 	struct kvm_s390_sie_block *sie_block = self->sie_block;
336 
337 	/* disable KSS */
338 	sie_block->cpuflags &= ~CPUSTAT_KSS;
339 	/* disable skey inst interception */
340 	sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
341 }
342 
343 /*
344  * Handle the instruction intercept
345  * Returns if interception is handled / execution can be continued
346  */
347 static bool uc_handle_insn_ic(FIXTURE_DATA(uc_kvm) *self)
348 {
349 	struct kvm_s390_sie_block *sie_block = self->sie_block;
350 	int ilen = insn_length(sie_block->ipa >> 8);
351 	struct kvm_run *run = self->run;
352 
353 	switch (run->s390_sieic.ipa) {
354 	case 0xB229: /* ISKE */
355 	case 0xB22b: /* SSKE */
356 	case 0xB22a: /* RRBE */
357 		uc_skey_enable(self);
358 
359 		/* rewind to reexecute intercepted instruction */
360 		run->psw_addr = run->psw_addr - ilen;
361 		pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr);
362 		return true;
363 	default:
364 		return false;
365 	}
366 }
367 
368 /*
369  * Handle the SIEIC exit
370  * * fail on codes not expected in the test cases
371  * Returns if interception is handled / execution can be continued
372  */
373 static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self)
374 {
375 	struct kvm_s390_sie_block *sie_block = self->sie_block;
376 	struct kvm_run *run = self->run;
377 
378 	/* check SIE interception code */
379 	pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n",
380 		run->s390_sieic.icptcode,
381 		run->s390_sieic.ipa,
382 		run->s390_sieic.ipb);
383 	switch (run->s390_sieic.icptcode) {
384 	case ICPT_INST:
385 		/* end execution in caller on intercepted instruction */
386 		pr_info("sie instruction interception\n");
387 		return uc_handle_insn_ic(self);
388 	case ICPT_KSS:
389 		uc_skey_enable(self);
390 		return true;
391 	case ICPT_OPEREXC:
392 		/* operation exception */
393 		TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
394 	default:
395 		TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
396 	}
397 	return true;
398 }
399 
400 /* verify VM state on exit */
401 static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self)
402 {
403 	struct kvm_run *run = self->run;
404 
405 	switch (run->exit_reason) {
406 	case KVM_EXIT_S390_UCONTROL:
407 		/** check program interruption code
408 		 * handle page fault --> ucas map
409 		 */
410 		uc_handle_exit_ucontrol(self);
411 		break;
412 	case KVM_EXIT_S390_SIEIC:
413 		return uc_handle_sieic(self);
414 	default:
415 		pr_info("exit_reason %2d not handled\n", run->exit_reason);
416 	}
417 	return true;
418 }
419 
420 /* run the VM until interrupted */
421 static int uc_run_once(FIXTURE_DATA(uc_kvm) *self)
422 {
423 	int rc;
424 
425 	rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
426 	print_run(self->run, self->sie_block);
427 	print_regs(self->run);
428 	pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
429 	return rc;
430 }
431 
432 static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) *self)
433 {
434 	struct kvm_s390_sie_block *sie_block = self->sie_block;
435 
436 	/* assert vm was interrupted by diag 0x0044 */
437 	TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
438 	TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
439 	TEST_ASSERT_EQ(0x8300, sie_block->ipa);
440 	TEST_ASSERT_EQ(0x440000, sie_block->ipb);
441 }
442 
443 TEST_F(uc_kvm, uc_no_user_region)
444 {
445 	struct kvm_userspace_memory_region region = {
446 		.slot = 1,
447 		.guest_phys_addr = self->code_gpa,
448 		.memory_size = VM_MEM_EXT_SIZE,
449 		.userspace_addr = (uintptr_t)self->code_hva,
450 	};
451 	struct kvm_userspace_memory_region2 region2 = {
452 		.slot = 1,
453 		.guest_phys_addr = self->code_gpa,
454 		.memory_size = VM_MEM_EXT_SIZE,
455 		.userspace_addr = (uintptr_t)self->code_hva,
456 	};
457 
458 	ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region));
459 	ASSERT_EQ(EINVAL, errno);
460 
461 	ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2));
462 	ASSERT_EQ(EINVAL, errno);
463 }
464 
465 TEST_F(uc_kvm, uc_map_unmap)
466 {
467 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
468 	struct kvm_run *run = self->run;
469 	const u64 disp = 1;
470 	int rc;
471 
472 	/* copy test_mem_asm to code_hva / code_gpa */
473 	TH_LOG("copy code %p to vm mapped memory %p / %p",
474 	       &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
475 	memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
476 
477 	/* DAT disabled + 64 bit mode */
478 	run->psw_mask = 0x0000000180000000ULL;
479 	run->psw_addr = self->code_gpa;
480 
481 	/* set register content for test_mem_asm to access not mapped memory*/
482 	sync_regs->gprs[1] = 0x55;
483 	sync_regs->gprs[5] = self->base_gpa;
484 	sync_regs->gprs[6] = VM_MEM_SIZE + disp;
485 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
486 
487 	/* run and expect to fail with ucontrol pic segment translation */
488 	ASSERT_EQ(0, uc_run_once(self));
489 	ASSERT_EQ(1, sync_regs->gprs[0]);
490 	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
491 
492 	ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
493 	ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
494 
495 	/* fail to map memory with not segment aligned address */
496 	rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE);
497 	ASSERT_GT(0, rc)
498 		TH_LOG("ucas map for non segment address should fail but didn't; "
499 		       "result %d not expected, %s", rc, strerror(errno));
500 
501 	/* map / make additional memory available */
502 	rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
503 	ASSERT_EQ(0, rc)
504 		TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
505 	ASSERT_EQ(0, uc_run_once(self));
506 	ASSERT_EQ(false, uc_handle_exit(self));
507 	uc_assert_diag44(self);
508 
509 	/* assert registers and memory are in expected state */
510 	ASSERT_EQ(2, sync_regs->gprs[0]);
511 	ASSERT_EQ(0x55, sync_regs->gprs[1]);
512 	ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp));
513 
514 	/* unmap and run loop again */
515 	rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
516 	ASSERT_EQ(0, rc)
517 		TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno));
518 	ASSERT_EQ(0, uc_run_once(self));
519 	ASSERT_EQ(3, sync_regs->gprs[0]);
520 	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
521 	ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
522 	/* handle ucontrol exit and remap memory after previous map and unmap */
523 	ASSERT_EQ(true, uc_handle_exit(self));
524 }
525 
526 TEST_F(uc_kvm, uc_gprs)
527 {
528 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
529 	struct kvm_run *run = self->run;
530 	struct kvm_regs regs = {};
531 
532 	/* Set registers to values that are different from the ones that we expect below */
533 	for (int i = 0; i < 8; i++)
534 		sync_regs->gprs[i] = 8;
535 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
536 
537 	/* copy test_gprs_asm to code_hva / code_gpa */
538 	TH_LOG("copy code %p to vm mapped memory %p / %p",
539 	       &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
540 	memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
541 
542 	/* DAT disabled + 64 bit mode */
543 	run->psw_mask = 0x0000000180000000ULL;
544 	run->psw_addr = self->code_gpa;
545 
546 	/* run and expect interception of diag 44 */
547 	ASSERT_EQ(0, uc_run_once(self));
548 	ASSERT_EQ(false, uc_handle_exit(self));
549 	uc_assert_diag44(self);
550 
551 	/* Retrieve and check guest register values */
552 	ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
553 	for (int i = 0; i < 8; i++) {
554 		ASSERT_EQ(i, regs.gprs[i]);
555 		ASSERT_EQ(i, sync_regs->gprs[i]);
556 	}
557 
558 	/* run and expect interception of diag 44 again */
559 	ASSERT_EQ(0, uc_run_once(self));
560 	ASSERT_EQ(false, uc_handle_exit(self));
561 	uc_assert_diag44(self);
562 
563 	/* check continued increment of register 0 value */
564 	ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
565 	ASSERT_EQ(1, regs.gprs[0]);
566 	ASSERT_EQ(1, sync_regs->gprs[0]);
567 }
568 
569 TEST_F(uc_kvm, uc_skey)
570 {
571 	struct kvm_s390_sie_block *sie_block = self->sie_block;
572 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
573 	u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2);
574 	struct kvm_run *run = self->run;
575 	const u8 skeyvalue = 0x34;
576 
577 	/* copy test_skey_asm to code_hva / code_gpa */
578 	TH_LOG("copy code %p to vm mapped memory %p / %p",
579 	       &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa);
580 	memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE);
581 
582 	/* set register content for test_skey_asm to access not mapped memory */
583 	sync_regs->gprs[1] = skeyvalue;
584 	sync_regs->gprs[5] = self->base_gpa;
585 	sync_regs->gprs[6] = test_vaddr;
586 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
587 
588 	/* DAT disabled + 64 bit mode */
589 	run->psw_mask = 0x0000000180000000ULL;
590 	run->psw_addr = self->code_gpa;
591 
592 	ASSERT_EQ(0, uc_run_once(self));
593 	ASSERT_EQ(true, uc_handle_exit(self));
594 	ASSERT_EQ(1, sync_regs->gprs[0]);
595 
596 	/* ISKE */
597 	ASSERT_EQ(0, uc_run_once(self));
598 
599 	/*
600 	 * Bail out and skip the test after uc_skey_enable was executed but iske
601 	 * is still intercepted. Instructions are not handled by the kernel.
602 	 * Thus there is no need to test this here.
603 	 */
604 	TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS);
605 	TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
606 	TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
607 	TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
608 	TEST_REQUIRE(sie_block->ipa != 0xb229);
609 
610 	/* ISKE contd. */
611 	ASSERT_EQ(false, uc_handle_exit(self));
612 	ASSERT_EQ(2, sync_regs->gprs[0]);
613 	/* assert initial skey (ACC = 0, R & C = 1) */
614 	ASSERT_EQ(0x06, sync_regs->gprs[1]);
615 	uc_assert_diag44(self);
616 
617 	/* SSKE + ISKE */
618 	sync_regs->gprs[1] = skeyvalue;
619 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
620 	ASSERT_EQ(0, uc_run_once(self));
621 	ASSERT_EQ(false, uc_handle_exit(self));
622 	ASSERT_EQ(3, sync_regs->gprs[0]);
623 	ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
624 	uc_assert_diag44(self);
625 
626 	/* RRBE + ISKE */
627 	sync_regs->gprs[1] = skeyvalue;
628 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
629 	ASSERT_EQ(0, uc_run_once(self));
630 	ASSERT_EQ(false, uc_handle_exit(self));
631 	ASSERT_EQ(4, sync_regs->gprs[0]);
632 	/* assert R reset but rest of skey unchanged */
633 	ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
634 	ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
635 	uc_assert_diag44(self);
636 }
637 
638 TEST_HARNESS_MAIN
639