xref: /linux/tools/testing/selftests/kvm/s390/ucontrol_test.c (revision 02e5f74ef08d3e6afec438d571487d0d0cec3c48)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test code for the s390x kvm ucontrol interface
4  *
5  * Copyright IBM Corp. 2024
6  *
7  * Authors:
8  *  Christoph Schlameuss <schlameuss@linux.ibm.com>
9  */
10 #include "debug_print.h"
11 #include "kselftest_harness.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "sie.h"
15 
16 #include <linux/capability.h>
17 #include <linux/sizes.h>
18 
19 #define PGM_SEGMENT_TRANSLATION 0x10
20 
21 #define VM_MEM_SIZE (4 * SZ_1M)
22 #define VM_MEM_EXT_SIZE (2 * SZ_1M)
23 #define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)
24 
25 /* so directly declare capget to check caps without libcap */
26 int capget(cap_user_header_t header, cap_user_data_t data);
27 
28 /**
29  * In order to create user controlled virtual machines on S390,
30  * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
31  * as privileged user (SYS_ADMIN).
32  */
require_ucontrol_admin(void)33 void require_ucontrol_admin(void)
34 {
35 	struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
36 	struct __user_cap_header_struct hdr = {
37 		.version = _LINUX_CAPABILITY_VERSION_3,
38 	};
39 	int rc;
40 
41 	rc = capget(&hdr, data);
42 	TEST_ASSERT_EQ(0, rc);
43 	TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
44 
45 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
46 }
47 
48 /* Test program setting some registers and looping */
49 extern char test_gprs_asm[];
50 asm("test_gprs_asm:\n"
51 	"xgr	%r0, %r0\n"
52 	"lgfi	%r1,1\n"
53 	"lgfi	%r2,2\n"
54 	"lgfi	%r3,3\n"
55 	"lgfi	%r4,4\n"
56 	"lgfi	%r5,5\n"
57 	"lgfi	%r6,6\n"
58 	"lgfi	%r7,7\n"
59 	"0:\n"
60 	"	diag	0,0,0x44\n"
61 	"	ahi	%r0,1\n"
62 	"	j	0b\n"
63 );
64 
65 /* Test program manipulating memory */
66 extern char test_mem_asm[];
67 asm("test_mem_asm:\n"
68 	"xgr	%r0, %r0\n"
69 
70 	"0:\n"
71 	"	ahi	%r0,1\n"
72 	"	st	%r1,0(%r5,%r6)\n"
73 
74 	"	xgr	%r1,%r1\n"
75 	"	l	%r1,0(%r5,%r6)\n"
76 	"	ahi	%r0,1\n"
77 	"	diag	0,0,0x44\n"
78 
79 	"	j	0b\n"
80 );
81 
82 /* Test program manipulating storage keys */
83 extern char test_skey_asm[];
84 asm("test_skey_asm:\n"
85 	"xgr	%r0, %r0\n"
86 
87 	"0:\n"
88 	"	ahi	%r0,1\n"
89 	"	st	%r1,0(%r5,%r6)\n"
90 
91 	"	sske	%r1,%r6\n"
92 	"	xgr	%r1,%r1\n"
93 	"	iske	%r1,%r6\n"
94 	"	ahi	%r0,1\n"
95 	"	diag	0,0,0x44\n"
96 
97 	"	rrbe	%r1,%r6\n"
98 	"	iske	%r1,%r6\n"
99 	"	ahi	%r0,1\n"
100 	"	diag	0,0,0x44\n"
101 
102 	"	j	0b\n"
103 );
104 
FIXTURE(uc_kvm)105 FIXTURE(uc_kvm)
106 {
107 	struct kvm_s390_sie_block *sie_block;
108 	struct kvm_run *run;
109 	uintptr_t base_gpa;
110 	uintptr_t code_gpa;
111 	uintptr_t base_hva;
112 	uintptr_t code_hva;
113 	int kvm_run_size;
114 	vm_paddr_t pgd;
115 	void *vm_mem;
116 	int vcpu_fd;
117 	int kvm_fd;
118 	int vm_fd;
119 };
120 
121 /**
122  * create VM with single vcpu, map kvm_run and SIE control block for easy access
123  */
FIXTURE_SETUP(uc_kvm)124 FIXTURE_SETUP(uc_kvm)
125 {
126 	struct kvm_s390_vm_cpu_processor info;
127 	int rc;
128 
129 	require_ucontrol_admin();
130 
131 	self->kvm_fd = open_kvm_dev_path_or_exit();
132 	self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
133 	ASSERT_GE(self->vm_fd, 0);
134 
135 	kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
136 			    KVM_S390_VM_CPU_PROCESSOR, &info);
137 	TH_LOG("create VM 0x%llx", info.cpuid);
138 
139 	self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
140 	ASSERT_GE(self->vcpu_fd, 0);
141 
142 	self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
143 	ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
144 		  TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
145 	self->run = kvm_mmap(self->kvm_run_size, PROT_READ | PROT_WRITE,
146 			     MAP_SHARED, self->vcpu_fd);
147 	/**
148 	 * For virtual cpus that have been created with S390 user controlled
149 	 * virtual machines, the resulting vcpu fd can be memory mapped at page
150 	 * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
151 	 * the virtual cpu's hardware control block.
152 	 */
153 	self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE,
154 				     MAP_SHARED, self->vcpu_fd,
155 				     KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
156 
157 	TH_LOG("VM created %p %p", self->run, self->sie_block);
158 
159 	self->base_gpa = 0;
160 	self->code_gpa = self->base_gpa + (3 * SZ_1M);
161 
162 	self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M);
163 	ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
164 	self->base_hva = (uintptr_t)self->vm_mem;
165 	self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
166 	struct kvm_s390_ucas_mapping map = {
167 		.user_addr = self->base_hva,
168 		.vcpu_addr = self->base_gpa,
169 		.length = VM_MEM_SIZE,
170 	};
171 	TH_LOG("ucas map %p %p 0x%llx",
172 	       (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
173 	rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
174 	ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
175 				rc, strerror(errno));
176 
177 	TH_LOG("page in %p", (void *)self->base_gpa);
178 	rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
179 	ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
180 				(void *)self->base_hva, rc, strerror(errno));
181 
182 	self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
183 }
184 
FIXTURE_TEARDOWN(uc_kvm)185 FIXTURE_TEARDOWN(uc_kvm)
186 {
187 	kvm_munmap(self->sie_block, PAGE_SIZE);
188 	kvm_munmap(self->run, self->kvm_run_size);
189 	close(self->vcpu_fd);
190 	close(self->vm_fd);
191 	close(self->kvm_fd);
192 	free(self->vm_mem);
193 }
194 
TEST_F(uc_kvm,uc_sie_assertions)195 TEST_F(uc_kvm, uc_sie_assertions)
196 {
197 	/* assert interception of Code 08 (Program Interruption) is set */
198 	EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
199 }
200 
TEST_F(uc_kvm,uc_attr_mem_limit)201 TEST_F(uc_kvm, uc_attr_mem_limit)
202 {
203 	u64 limit;
204 	struct kvm_device_attr attr = {
205 		.group = KVM_S390_VM_MEM_CTRL,
206 		.attr = KVM_S390_VM_MEM_LIMIT_SIZE,
207 		.addr = (u64)&limit,
208 	};
209 	int rc;
210 
211 	rc = ioctl(self->vm_fd, KVM_HAS_DEVICE_ATTR, &attr);
212 	EXPECT_EQ(0, rc);
213 
214 	rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
215 	EXPECT_EQ(0, rc);
216 	EXPECT_EQ(~0UL, limit);
217 
218 	/* assert set not supported */
219 	rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
220 	EXPECT_EQ(-1, rc);
221 	EXPECT_EQ(EINVAL, errno);
222 }
223 
TEST_F(uc_kvm,uc_no_dirty_log)224 TEST_F(uc_kvm, uc_no_dirty_log)
225 {
226 	struct kvm_dirty_log dlog;
227 	int rc;
228 
229 	rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
230 	EXPECT_EQ(-1, rc);
231 	EXPECT_EQ(EINVAL, errno);
232 }
233 
234 /**
235  * Assert HPAGE CAP cannot be enabled on UCONTROL VM
236  */
TEST(uc_cap_hpage)237 TEST(uc_cap_hpage)
238 {
239 	int rc, kvm_fd, vm_fd, vcpu_fd;
240 	struct kvm_enable_cap cap = {
241 		.cap = KVM_CAP_S390_HPAGE_1M,
242 	};
243 
244 	require_ucontrol_admin();
245 
246 	kvm_fd = open_kvm_dev_path_or_exit();
247 	vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
248 	ASSERT_GE(vm_fd, 0);
249 
250 	/* assert hpages are not supported on ucontrol vm */
251 	rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
252 	EXPECT_EQ(0, rc);
253 
254 	/* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
255 	rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
256 	EXPECT_EQ(-1, rc);
257 	EXPECT_EQ(EINVAL, errno);
258 
259 	/* assert HPAGE CAP is rejected after vCPU creation */
260 	vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
261 	ASSERT_GE(vcpu_fd, 0);
262 	rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
263 	EXPECT_EQ(-1, rc);
264 	EXPECT_EQ(EBUSY, errno);
265 
266 	close(vcpu_fd);
267 	close(vm_fd);
268 	close(kvm_fd);
269 }
270 
271 /* calculate host virtual addr from guest physical addr */
gpa2hva(FIXTURE_DATA (uc_kvm)* self,u64 gpa)272 static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
273 {
274 	return (void *)(self->base_hva - self->base_gpa + gpa);
275 }
276 
277 /* map / make additional memory available */
uc_map_ext(FIXTURE_DATA (uc_kvm)* self,u64 vcpu_addr,u64 length)278 static int uc_map_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
279 {
280 	struct kvm_s390_ucas_mapping map = {
281 		.user_addr = (u64)gpa2hva(self, vcpu_addr),
282 		.vcpu_addr = vcpu_addr,
283 		.length = length,
284 	};
285 	pr_info("ucas map %p %p 0x%llx",
286 		(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
287 	return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
288 }
289 
290 /* unmap previously mapped memory */
uc_unmap_ext(FIXTURE_DATA (uc_kvm)* self,u64 vcpu_addr,u64 length)291 static int uc_unmap_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
292 {
293 	struct kvm_s390_ucas_mapping map = {
294 		.user_addr = (u64)gpa2hva(self, vcpu_addr),
295 		.vcpu_addr = vcpu_addr,
296 		.length = length,
297 	};
298 	pr_info("ucas unmap %p %p 0x%llx",
299 		(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
300 	return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map);
301 }
302 
303 /* handle ucontrol exit by mapping the accessed segment */
uc_handle_exit_ucontrol(FIXTURE_DATA (uc_kvm)* self)304 static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) *self)
305 {
306 	struct kvm_run *run = self->run;
307 	u64 seg_addr;
308 	int rc;
309 
310 	TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
311 	switch (run->s390_ucontrol.pgm_code) {
312 	case PGM_SEGMENT_TRANSLATION:
313 		seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1);
314 		pr_info("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n",
315 			run->s390_ucontrol.trans_exc_code, seg_addr);
316 		/* map / make additional memory available */
317 		rc = uc_map_ext(self, seg_addr, SZ_1M);
318 		TEST_ASSERT_EQ(0, rc);
319 		break;
320 	default:
321 		TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
322 	}
323 }
324 
325 /*
326  * Handle the SIEIC exit
327  * * fail on codes not expected in the test cases
328  * Returns if interception is handled / execution can be continued
329  */
uc_skey_enable(FIXTURE_DATA (uc_kvm)* self)330 static void uc_skey_enable(FIXTURE_DATA(uc_kvm) *self)
331 {
332 	struct kvm_s390_sie_block *sie_block = self->sie_block;
333 
334 	/* disable KSS */
335 	sie_block->cpuflags &= ~CPUSTAT_KSS;
336 	/* disable skey inst interception */
337 	sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
338 }
339 
340 /*
341  * Handle the instruction intercept
342  * Returns if interception is handled / execution can be continued
343  */
uc_handle_insn_ic(FIXTURE_DATA (uc_kvm)* self)344 static bool uc_handle_insn_ic(FIXTURE_DATA(uc_kvm) *self)
345 {
346 	struct kvm_s390_sie_block *sie_block = self->sie_block;
347 	int ilen = insn_length(sie_block->ipa >> 8);
348 	struct kvm_run *run = self->run;
349 
350 	switch (run->s390_sieic.ipa) {
351 	case 0xB229: /* ISKE */
352 	case 0xB22b: /* SSKE */
353 	case 0xB22a: /* RRBE */
354 		uc_skey_enable(self);
355 
356 		/* rewind to reexecute intercepted instruction */
357 		run->psw_addr = run->psw_addr - ilen;
358 		pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr);
359 		return true;
360 	default:
361 		return false;
362 	}
363 }
364 
365 /*
366  * Handle the SIEIC exit
367  * * fail on codes not expected in the test cases
368  * Returns if interception is handled / execution can be continued
369  */
uc_handle_sieic(FIXTURE_DATA (uc_kvm)* self)370 static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self)
371 {
372 	struct kvm_s390_sie_block *sie_block = self->sie_block;
373 	struct kvm_run *run = self->run;
374 
375 	/* check SIE interception code */
376 	pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n",
377 		run->s390_sieic.icptcode,
378 		run->s390_sieic.ipa,
379 		run->s390_sieic.ipb);
380 	switch (run->s390_sieic.icptcode) {
381 	case ICPT_INST:
382 		/* end execution in caller on intercepted instruction */
383 		pr_info("sie instruction interception\n");
384 		return uc_handle_insn_ic(self);
385 	case ICPT_KSS:
386 		uc_skey_enable(self);
387 		return true;
388 	case ICPT_OPEREXC:
389 		/* operation exception */
390 		TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
391 	default:
392 		TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
393 	}
394 	return true;
395 }
396 
397 /* verify VM state on exit */
uc_handle_exit(FIXTURE_DATA (uc_kvm)* self)398 static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self)
399 {
400 	struct kvm_run *run = self->run;
401 
402 	switch (run->exit_reason) {
403 	case KVM_EXIT_S390_UCONTROL:
404 		/** check program interruption code
405 		 * handle page fault --> ucas map
406 		 */
407 		uc_handle_exit_ucontrol(self);
408 		break;
409 	case KVM_EXIT_S390_SIEIC:
410 		return uc_handle_sieic(self);
411 	default:
412 		pr_info("exit_reason %2d not handled\n", run->exit_reason);
413 	}
414 	return true;
415 }
416 
417 /* run the VM until interrupted */
uc_run_once(FIXTURE_DATA (uc_kvm)* self)418 static int uc_run_once(FIXTURE_DATA(uc_kvm) *self)
419 {
420 	int rc;
421 
422 	rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
423 	print_run(self->run, self->sie_block);
424 	print_regs(self->run);
425 	pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
426 	return rc;
427 }
428 
uc_assert_diag44(FIXTURE_DATA (uc_kvm)* self)429 static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) *self)
430 {
431 	struct kvm_s390_sie_block *sie_block = self->sie_block;
432 
433 	/* assert vm was interrupted by diag 0x0044 */
434 	TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
435 	TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
436 	TEST_ASSERT_EQ(0x8300, sie_block->ipa);
437 	TEST_ASSERT_EQ(0x440000, sie_block->ipb);
438 }
439 
TEST_F(uc_kvm,uc_no_user_region)440 TEST_F(uc_kvm, uc_no_user_region)
441 {
442 	struct kvm_userspace_memory_region region = {
443 		.slot = 1,
444 		.guest_phys_addr = self->code_gpa,
445 		.memory_size = VM_MEM_EXT_SIZE,
446 		.userspace_addr = (uintptr_t)self->code_hva,
447 	};
448 	struct kvm_userspace_memory_region2 region2 = {
449 		.slot = 1,
450 		.guest_phys_addr = self->code_gpa,
451 		.memory_size = VM_MEM_EXT_SIZE,
452 		.userspace_addr = (uintptr_t)self->code_hva,
453 	};
454 
455 	ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region));
456 	ASSERT_TRUE(errno == EEXIST || errno == EINVAL)
457 		TH_LOG("errno %s (%i) not expected for ioctl KVM_SET_USER_MEMORY_REGION",
458 		       strerror(errno), errno);
459 
460 	ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2));
461 	ASSERT_TRUE(errno == EEXIST || errno == EINVAL)
462 		TH_LOG("errno %s (%i) not expected for ioctl KVM_SET_USER_MEMORY_REGION2",
463 		       strerror(errno), errno);
464 }
465 
TEST_F(uc_kvm,uc_map_unmap)466 TEST_F(uc_kvm, uc_map_unmap)
467 {
468 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
469 	struct kvm_run *run = self->run;
470 	const u64 disp = 1;
471 	int rc;
472 
473 	/* copy test_mem_asm to code_hva / code_gpa */
474 	TH_LOG("copy code %p to vm mapped memory %p / %p",
475 	       &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
476 	memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
477 
478 	/* DAT disabled + 64 bit mode */
479 	run->psw_mask = 0x0000000180000000ULL;
480 	run->psw_addr = self->code_gpa;
481 
482 	/* set register content for test_mem_asm to access not mapped memory*/
483 	sync_regs->gprs[1] = 0x55;
484 	sync_regs->gprs[5] = self->base_gpa;
485 	sync_regs->gprs[6] = VM_MEM_SIZE + disp;
486 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
487 
488 	/* run and expect to fail with ucontrol pic segment translation */
489 	ASSERT_EQ(0, uc_run_once(self));
490 	ASSERT_EQ(1, sync_regs->gprs[0]);
491 	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
492 
493 	ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
494 	ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
495 
496 	/* fail to map memory with not segment aligned address */
497 	rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE);
498 	ASSERT_GT(0, rc)
499 		TH_LOG("ucas map for non segment address should fail but didn't; "
500 		       "result %d not expected, %s", rc, strerror(errno));
501 
502 	/* map / make additional memory available */
503 	rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
504 	ASSERT_EQ(0, rc)
505 		TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
506 	ASSERT_EQ(0, uc_run_once(self));
507 	ASSERT_EQ(false, uc_handle_exit(self));
508 	uc_assert_diag44(self);
509 
510 	/* assert registers and memory are in expected state */
511 	ASSERT_EQ(2, sync_regs->gprs[0]);
512 	ASSERT_EQ(0x55, sync_regs->gprs[1]);
513 	ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp));
514 
515 	/* unmap and run loop again */
516 	rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
517 	ASSERT_EQ(0, rc)
518 		TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno));
519 	ASSERT_EQ(0, uc_run_once(self));
520 	ASSERT_EQ(3, sync_regs->gprs[0]);
521 	ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
522 	ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
523 	/* handle ucontrol exit and remap memory after previous map and unmap */
524 	ASSERT_EQ(true, uc_handle_exit(self));
525 }
526 
TEST_F(uc_kvm,uc_gprs)527 TEST_F(uc_kvm, uc_gprs)
528 {
529 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
530 	struct kvm_run *run = self->run;
531 	struct kvm_regs regs = {};
532 
533 	/* Set registers to values that are different from the ones that we expect below */
534 	for (int i = 0; i < 8; i++)
535 		sync_regs->gprs[i] = 8;
536 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
537 
538 	/* copy test_gprs_asm to code_hva / code_gpa */
539 	TH_LOG("copy code %p to vm mapped memory %p / %p",
540 	       &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
541 	memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
542 
543 	/* DAT disabled + 64 bit mode */
544 	run->psw_mask = 0x0000000180000000ULL;
545 	run->psw_addr = self->code_gpa;
546 
547 	/* run and expect interception of diag 44 */
548 	ASSERT_EQ(0, uc_run_once(self));
549 	ASSERT_EQ(false, uc_handle_exit(self));
550 	uc_assert_diag44(self);
551 
552 	/* Retrieve and check guest register values */
553 	ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
554 	for (int i = 0; i < 8; i++) {
555 		ASSERT_EQ(i, regs.gprs[i]);
556 		ASSERT_EQ(i, sync_regs->gprs[i]);
557 	}
558 
559 	/* run and expect interception of diag 44 again */
560 	ASSERT_EQ(0, uc_run_once(self));
561 	ASSERT_EQ(false, uc_handle_exit(self));
562 	uc_assert_diag44(self);
563 
564 	/* check continued increment of register 0 value */
565 	ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
566 	ASSERT_EQ(1, regs.gprs[0]);
567 	ASSERT_EQ(1, sync_regs->gprs[0]);
568 }
569 
TEST_F(uc_kvm,uc_skey)570 TEST_F(uc_kvm, uc_skey)
571 {
572 	struct kvm_s390_sie_block *sie_block = self->sie_block;
573 	struct kvm_sync_regs *sync_regs = &self->run->s.regs;
574 	u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2);
575 	struct kvm_run *run = self->run;
576 	const u8 skeyvalue = 0x34;
577 
578 	/* copy test_skey_asm to code_hva / code_gpa */
579 	TH_LOG("copy code %p to vm mapped memory %p / %p",
580 	       &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa);
581 	memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE);
582 
583 	/* set register content for test_skey_asm to access not mapped memory */
584 	sync_regs->gprs[1] = skeyvalue;
585 	sync_regs->gprs[5] = self->base_gpa;
586 	sync_regs->gprs[6] = test_vaddr;
587 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
588 
589 	/* DAT disabled + 64 bit mode */
590 	run->psw_mask = 0x0000000180000000ULL;
591 	run->psw_addr = self->code_gpa;
592 
593 	ASSERT_EQ(0, uc_run_once(self));
594 	ASSERT_EQ(true, uc_handle_exit(self));
595 	ASSERT_EQ(1, sync_regs->gprs[0]);
596 
597 	/* SSKE + ISKE */
598 	sync_regs->gprs[1] = skeyvalue;
599 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
600 	ASSERT_EQ(0, uc_run_once(self));
601 
602 	/*
603 	 * Bail out and skip the test after uc_skey_enable was executed but iske
604 	 * is still intercepted. Instructions are not handled by the kernel.
605 	 * Thus there is no need to test this here.
606 	 */
607 	TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS);
608 	TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
609 	TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
610 	TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
611 	TEST_REQUIRE(sie_block->ipa != 0xb22b);
612 
613 	/* SSKE + ISKE contd. */
614 	ASSERT_EQ(false, uc_handle_exit(self));
615 	ASSERT_EQ(2, sync_regs->gprs[0]);
616 	ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
617 	uc_assert_diag44(self);
618 
619 	/* RRBE + ISKE */
620 	sync_regs->gprs[1] = skeyvalue;
621 	run->kvm_dirty_regs |= KVM_SYNC_GPRS;
622 	ASSERT_EQ(0, uc_run_once(self));
623 	ASSERT_EQ(false, uc_handle_exit(self));
624 	ASSERT_EQ(3, sync_regs->gprs[0]);
625 	/* assert R reset but rest of skey unchanged */
626 	ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
627 	ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
628 	uc_assert_diag44(self);
629 }
630 
631 static char uc_flic_b[PAGE_SIZE];
632 static struct kvm_s390_io_adapter uc_flic_ioa = { .id = 0 };
633 static struct kvm_s390_io_adapter_req uc_flic_ioam = { .id = 0 };
634 static struct kvm_s390_ais_req uc_flic_asim = { .isc = 0 };
635 static struct kvm_s390_ais_all uc_flic_asima = { .simm = 0 };
636 static struct uc_flic_attr_test {
637 	char *name;
638 	struct kvm_device_attr a;
639 	int hasrc;
640 	int geterrno;
641 	int seterrno;
642 } uc_flic_attr_tests[] = {
643 	{
644 		.name = "KVM_DEV_FLIC_GET_ALL_IRQS",
645 		.seterrno = EINVAL,
646 		.a = {
647 			.group = KVM_DEV_FLIC_GET_ALL_IRQS,
648 			.addr = (u64)&uc_flic_b,
649 			.attr = PAGE_SIZE,
650 		},
651 	},
652 	{
653 		.name = "KVM_DEV_FLIC_ENQUEUE",
654 		.geterrno = EINVAL,
655 		.a = { .group = KVM_DEV_FLIC_ENQUEUE, },
656 	},
657 	{
658 		.name = "KVM_DEV_FLIC_CLEAR_IRQS",
659 		.geterrno = EINVAL,
660 		.a = { .group = KVM_DEV_FLIC_CLEAR_IRQS, },
661 	},
662 	{
663 		.name = "KVM_DEV_FLIC_ADAPTER_REGISTER",
664 		.geterrno = EINVAL,
665 		.a = {
666 			.group = KVM_DEV_FLIC_ADAPTER_REGISTER,
667 			.addr = (u64)&uc_flic_ioa,
668 		},
669 	},
670 	{
671 		.name = "KVM_DEV_FLIC_ADAPTER_MODIFY",
672 		.geterrno = EINVAL,
673 		.seterrno = EINVAL,
674 		.a = {
675 			.group = KVM_DEV_FLIC_ADAPTER_MODIFY,
676 			.addr = (u64)&uc_flic_ioam,
677 			.attr = sizeof(uc_flic_ioam),
678 		},
679 	},
680 	{
681 		.name = "KVM_DEV_FLIC_CLEAR_IO_IRQ",
682 		.geterrno = EINVAL,
683 		.seterrno = EINVAL,
684 		.a = {
685 			.group = KVM_DEV_FLIC_CLEAR_IO_IRQ,
686 			.attr = 32,
687 		},
688 	},
689 	{
690 		.name = "KVM_DEV_FLIC_AISM",
691 		.geterrno = EINVAL,
692 		.seterrno = ENOTSUP,
693 		.a = {
694 			.group = KVM_DEV_FLIC_AISM,
695 			.addr = (u64)&uc_flic_asim,
696 		},
697 	},
698 	{
699 		.name = "KVM_DEV_FLIC_AIRQ_INJECT",
700 		.geterrno = EINVAL,
701 		.a = { .group = KVM_DEV_FLIC_AIRQ_INJECT, },
702 	},
703 	{
704 		.name = "KVM_DEV_FLIC_AISM_ALL",
705 		.geterrno = ENOTSUP,
706 		.seterrno = ENOTSUP,
707 		.a = {
708 			.group = KVM_DEV_FLIC_AISM_ALL,
709 			.addr = (u64)&uc_flic_asima,
710 			.attr = sizeof(uc_flic_asima),
711 		},
712 	},
713 	{
714 		.name = "KVM_DEV_FLIC_APF_ENABLE",
715 		.geterrno = EINVAL,
716 		.seterrno = EINVAL,
717 		.a = { .group = KVM_DEV_FLIC_APF_ENABLE, },
718 	},
719 	{
720 		.name = "KVM_DEV_FLIC_APF_DISABLE_WAIT",
721 		.geterrno = EINVAL,
722 		.seterrno = EINVAL,
723 		.a = { .group = KVM_DEV_FLIC_APF_DISABLE_WAIT, },
724 	},
725 };
726 
TEST_F(uc_kvm,uc_flic_attrs)727 TEST_F(uc_kvm, uc_flic_attrs)
728 {
729 	struct kvm_create_device cd = { .type = KVM_DEV_TYPE_FLIC };
730 	struct kvm_device_attr attr;
731 	u64 value;
732 	int rc, i;
733 
734 	rc = ioctl(self->vm_fd, KVM_CREATE_DEVICE, &cd);
735 	ASSERT_EQ(0, rc) TH_LOG("create device failed with err %s (%i)",
736 				strerror(errno), errno);
737 
738 	for (i = 0; i < ARRAY_SIZE(uc_flic_attr_tests); i++) {
739 		TH_LOG("test %s", uc_flic_attr_tests[i].name);
740 		attr = (struct kvm_device_attr) {
741 			.group = uc_flic_attr_tests[i].a.group,
742 			.attr = uc_flic_attr_tests[i].a.attr,
743 			.addr = uc_flic_attr_tests[i].a.addr,
744 		};
745 		if (attr.addr == 0)
746 			attr.addr = (u64)&value;
747 
748 		rc = ioctl(cd.fd, KVM_HAS_DEVICE_ATTR, &attr);
749 		EXPECT_EQ(uc_flic_attr_tests[i].hasrc, !!rc)
750 			TH_LOG("expected dev attr missing %s",
751 			       uc_flic_attr_tests[i].name);
752 
753 		rc = ioctl(cd.fd, KVM_GET_DEVICE_ATTR, &attr);
754 		EXPECT_EQ(!!uc_flic_attr_tests[i].geterrno, !!rc)
755 			TH_LOG("get dev attr rc not expected on %s %s (%i)",
756 			       uc_flic_attr_tests[i].name,
757 			       strerror(errno), errno);
758 		if (uc_flic_attr_tests[i].geterrno)
759 			EXPECT_EQ(uc_flic_attr_tests[i].geterrno, errno)
760 				TH_LOG("get dev attr errno not expected on %s %s (%i)",
761 				       uc_flic_attr_tests[i].name,
762 				       strerror(errno), errno);
763 
764 		rc = ioctl(cd.fd, KVM_SET_DEVICE_ATTR, &attr);
765 		EXPECT_EQ(!!uc_flic_attr_tests[i].seterrno, !!rc)
766 			TH_LOG("set sev attr rc not expected on %s %s (%i)",
767 			       uc_flic_attr_tests[i].name,
768 			       strerror(errno), errno);
769 		if (uc_flic_attr_tests[i].seterrno)
770 			EXPECT_EQ(uc_flic_attr_tests[i].seterrno, errno)
771 				TH_LOG("set dev attr errno not expected on %s %s (%i)",
772 				       uc_flic_attr_tests[i].name,
773 				       strerror(errno), errno);
774 	}
775 
776 	close(cd.fd);
777 }
778 
TEST_F(uc_kvm,uc_set_gsi_routing)779 TEST_F(uc_kvm, uc_set_gsi_routing)
780 {
781 	struct kvm_irq_routing *routing = kvm_gsi_routing_create();
782 	struct kvm_irq_routing_entry ue = {
783 		.type = KVM_IRQ_ROUTING_S390_ADAPTER,
784 		.gsi = 1,
785 		.u.adapter = (struct kvm_irq_routing_s390_adapter) {
786 			.ind_addr = 0,
787 		},
788 	};
789 	int rc;
790 
791 	routing->entries[0] = ue;
792 	routing->nr = 1;
793 	rc = ioctl(self->vm_fd, KVM_SET_GSI_ROUTING, routing);
794 	ASSERT_EQ(-1, rc) TH_LOG("err %s (%i)", strerror(errno), errno);
795 	ASSERT_EQ(EINVAL, errno) TH_LOG("err %s (%i)", strerror(errno), errno);
796 }
797 
798 TEST_HARNESS_MAIN
799