1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Test code for the s390x kvm ucontrol interface
4 *
5 * Copyright IBM Corp. 2024
6 *
7 * Authors:
8 * Christoph Schlameuss <schlameuss@linux.ibm.com>
9 */
10 #include "debug_print.h"
11 #include "kselftest_harness.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "sie.h"
15
16 #include <linux/capability.h>
17 #include <linux/sizes.h>
18
19 #define PGM_SEGMENT_TRANSLATION 0x10
20
21 #define VM_MEM_SIZE (4 * SZ_1M)
22 #define VM_MEM_EXT_SIZE (2 * SZ_1M)
23 #define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)
24
25 /* so directly declare capget to check caps without libcap */
26 int capget(cap_user_header_t header, cap_user_data_t data);
27
28 /**
29 * In order to create user controlled virtual machines on S390,
30 * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
31 * as privileged user (SYS_ADMIN).
32 */
require_ucontrol_admin(void)33 void require_ucontrol_admin(void)
34 {
35 struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
36 struct __user_cap_header_struct hdr = {
37 .version = _LINUX_CAPABILITY_VERSION_3,
38 };
39 int rc;
40
41 rc = capget(&hdr, data);
42 TEST_ASSERT_EQ(0, rc);
43 TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
44
45 TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
46 }
47
48 /* Test program setting some registers and looping */
49 extern char test_gprs_asm[];
50 asm("test_gprs_asm:\n"
51 "xgr %r0, %r0\n"
52 "lgfi %r1,1\n"
53 "lgfi %r2,2\n"
54 "lgfi %r3,3\n"
55 "lgfi %r4,4\n"
56 "lgfi %r5,5\n"
57 "lgfi %r6,6\n"
58 "lgfi %r7,7\n"
59 "0:\n"
60 " diag 0,0,0x44\n"
61 " ahi %r0,1\n"
62 " j 0b\n"
63 );
64
65 /* Test program manipulating memory */
66 extern char test_mem_asm[];
67 asm("test_mem_asm:\n"
68 "xgr %r0, %r0\n"
69
70 "0:\n"
71 " ahi %r0,1\n"
72 " st %r1,0(%r5,%r6)\n"
73
74 " xgr %r1,%r1\n"
75 " l %r1,0(%r5,%r6)\n"
76 " ahi %r0,1\n"
77 " diag 0,0,0x44\n"
78
79 " j 0b\n"
80 );
81
82 /* Test program manipulating storage keys */
83 extern char test_skey_asm[];
84 asm("test_skey_asm:\n"
85 "xgr %r0, %r0\n"
86
87 "0:\n"
88 " ahi %r0,1\n"
89 " st %r1,0(%r5,%r6)\n"
90
91 " sske %r1,%r6\n"
92 " xgr %r1,%r1\n"
93 " iske %r1,%r6\n"
94 " ahi %r0,1\n"
95 " diag 0,0,0x44\n"
96
97 " rrbe %r1,%r6\n"
98 " iske %r1,%r6\n"
99 " ahi %r0,1\n"
100 " diag 0,0,0x44\n"
101
102 " j 0b\n"
103 );
104
FIXTURE(uc_kvm)105 FIXTURE(uc_kvm)
106 {
107 struct kvm_s390_sie_block *sie_block;
108 struct kvm_run *run;
109 uintptr_t base_gpa;
110 uintptr_t code_gpa;
111 uintptr_t base_hva;
112 uintptr_t code_hva;
113 int kvm_run_size;
114 vm_paddr_t pgd;
115 void *vm_mem;
116 int vcpu_fd;
117 int kvm_fd;
118 int vm_fd;
119 };
120
121 /**
122 * create VM with single vcpu, map kvm_run and SIE control block for easy access
123 */
FIXTURE_SETUP(uc_kvm)124 FIXTURE_SETUP(uc_kvm)
125 {
126 struct kvm_s390_vm_cpu_processor info;
127 int rc;
128
129 require_ucontrol_admin();
130
131 self->kvm_fd = open_kvm_dev_path_or_exit();
132 self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
133 ASSERT_GE(self->vm_fd, 0);
134
135 kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
136 KVM_S390_VM_CPU_PROCESSOR, &info);
137 TH_LOG("create VM 0x%llx", info.cpuid);
138
139 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
140 ASSERT_GE(self->vcpu_fd, 0);
141
142 self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
143 ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
144 TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
145 self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
146 PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
147 ASSERT_NE(self->run, MAP_FAILED);
148 /**
149 * For virtual cpus that have been created with S390 user controlled
150 * virtual machines, the resulting vcpu fd can be memory mapped at page
151 * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
152 * the virtual cpu's hardware control block.
153 */
154 self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
155 PROT_READ | PROT_WRITE, MAP_SHARED,
156 self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
157 ASSERT_NE(self->sie_block, MAP_FAILED);
158
159 TH_LOG("VM created %p %p", self->run, self->sie_block);
160
161 self->base_gpa = 0;
162 self->code_gpa = self->base_gpa + (3 * SZ_1M);
163
164 self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M);
165 ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
166 self->base_hva = (uintptr_t)self->vm_mem;
167 self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
168 struct kvm_s390_ucas_mapping map = {
169 .user_addr = self->base_hva,
170 .vcpu_addr = self->base_gpa,
171 .length = VM_MEM_SIZE,
172 };
173 TH_LOG("ucas map %p %p 0x%llx",
174 (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
175 rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
176 ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
177 rc, strerror(errno));
178
179 TH_LOG("page in %p", (void *)self->base_gpa);
180 rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
181 ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
182 (void *)self->base_hva, rc, strerror(errno));
183
184 self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
185 }
186
FIXTURE_TEARDOWN(uc_kvm)187 FIXTURE_TEARDOWN(uc_kvm)
188 {
189 munmap(self->sie_block, PAGE_SIZE);
190 munmap(self->run, self->kvm_run_size);
191 close(self->vcpu_fd);
192 close(self->vm_fd);
193 close(self->kvm_fd);
194 free(self->vm_mem);
195 }
196
TEST_F(uc_kvm,uc_sie_assertions)197 TEST_F(uc_kvm, uc_sie_assertions)
198 {
199 /* assert interception of Code 08 (Program Interruption) is set */
200 EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
201 }
202
TEST_F(uc_kvm,uc_attr_mem_limit)203 TEST_F(uc_kvm, uc_attr_mem_limit)
204 {
205 u64 limit;
206 struct kvm_device_attr attr = {
207 .group = KVM_S390_VM_MEM_CTRL,
208 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
209 .addr = (u64)&limit,
210 };
211 int rc;
212
213 rc = ioctl(self->vm_fd, KVM_HAS_DEVICE_ATTR, &attr);
214 EXPECT_EQ(0, rc);
215
216 rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
217 EXPECT_EQ(0, rc);
218 EXPECT_EQ(~0UL, limit);
219
220 /* assert set not supported */
221 rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
222 EXPECT_EQ(-1, rc);
223 EXPECT_EQ(EINVAL, errno);
224 }
225
TEST_F(uc_kvm,uc_no_dirty_log)226 TEST_F(uc_kvm, uc_no_dirty_log)
227 {
228 struct kvm_dirty_log dlog;
229 int rc;
230
231 rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
232 EXPECT_EQ(-1, rc);
233 EXPECT_EQ(EINVAL, errno);
234 }
235
236 /**
237 * Assert HPAGE CAP cannot be enabled on UCONTROL VM
238 */
TEST(uc_cap_hpage)239 TEST(uc_cap_hpage)
240 {
241 int rc, kvm_fd, vm_fd, vcpu_fd;
242 struct kvm_enable_cap cap = {
243 .cap = KVM_CAP_S390_HPAGE_1M,
244 };
245
246 require_ucontrol_admin();
247
248 kvm_fd = open_kvm_dev_path_or_exit();
249 vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
250 ASSERT_GE(vm_fd, 0);
251
252 /* assert hpages are not supported on ucontrol vm */
253 rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
254 EXPECT_EQ(0, rc);
255
256 /* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
257 rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
258 EXPECT_EQ(-1, rc);
259 EXPECT_EQ(EINVAL, errno);
260
261 /* assert HPAGE CAP is rejected after vCPU creation */
262 vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
263 ASSERT_GE(vcpu_fd, 0);
264 rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
265 EXPECT_EQ(-1, rc);
266 EXPECT_EQ(EBUSY, errno);
267
268 close(vcpu_fd);
269 close(vm_fd);
270 close(kvm_fd);
271 }
272
273 /* calculate host virtual addr from guest physical addr */
gpa2hva(FIXTURE_DATA (uc_kvm)* self,u64 gpa)274 static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
275 {
276 return (void *)(self->base_hva - self->base_gpa + gpa);
277 }
278
279 /* map / make additional memory available */
uc_map_ext(FIXTURE_DATA (uc_kvm)* self,u64 vcpu_addr,u64 length)280 static int uc_map_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
281 {
282 struct kvm_s390_ucas_mapping map = {
283 .user_addr = (u64)gpa2hva(self, vcpu_addr),
284 .vcpu_addr = vcpu_addr,
285 .length = length,
286 };
287 pr_info("ucas map %p %p 0x%llx",
288 (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
289 return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
290 }
291
292 /* unmap previously mapped memory */
uc_unmap_ext(FIXTURE_DATA (uc_kvm)* self,u64 vcpu_addr,u64 length)293 static int uc_unmap_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
294 {
295 struct kvm_s390_ucas_mapping map = {
296 .user_addr = (u64)gpa2hva(self, vcpu_addr),
297 .vcpu_addr = vcpu_addr,
298 .length = length,
299 };
300 pr_info("ucas unmap %p %p 0x%llx",
301 (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
302 return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map);
303 }
304
305 /* handle ucontrol exit by mapping the accessed segment */
uc_handle_exit_ucontrol(FIXTURE_DATA (uc_kvm)* self)306 static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) *self)
307 {
308 struct kvm_run *run = self->run;
309 u64 seg_addr;
310 int rc;
311
312 TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
313 switch (run->s390_ucontrol.pgm_code) {
314 case PGM_SEGMENT_TRANSLATION:
315 seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1);
316 pr_info("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n",
317 run->s390_ucontrol.trans_exc_code, seg_addr);
318 /* map / make additional memory available */
319 rc = uc_map_ext(self, seg_addr, SZ_1M);
320 TEST_ASSERT_EQ(0, rc);
321 break;
322 default:
323 TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
324 }
325 }
326
327 /*
328 * Handle the SIEIC exit
329 * * fail on codes not expected in the test cases
330 * Returns if interception is handled / execution can be continued
331 */
uc_skey_enable(FIXTURE_DATA (uc_kvm)* self)332 static void uc_skey_enable(FIXTURE_DATA(uc_kvm) *self)
333 {
334 struct kvm_s390_sie_block *sie_block = self->sie_block;
335
336 /* disable KSS */
337 sie_block->cpuflags &= ~CPUSTAT_KSS;
338 /* disable skey inst interception */
339 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
340 }
341
342 /*
343 * Handle the instruction intercept
344 * Returns if interception is handled / execution can be continued
345 */
uc_handle_insn_ic(FIXTURE_DATA (uc_kvm)* self)346 static bool uc_handle_insn_ic(FIXTURE_DATA(uc_kvm) *self)
347 {
348 struct kvm_s390_sie_block *sie_block = self->sie_block;
349 int ilen = insn_length(sie_block->ipa >> 8);
350 struct kvm_run *run = self->run;
351
352 switch (run->s390_sieic.ipa) {
353 case 0xB229: /* ISKE */
354 case 0xB22b: /* SSKE */
355 case 0xB22a: /* RRBE */
356 uc_skey_enable(self);
357
358 /* rewind to reexecute intercepted instruction */
359 run->psw_addr = run->psw_addr - ilen;
360 pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr);
361 return true;
362 default:
363 return false;
364 }
365 }
366
367 /*
368 * Handle the SIEIC exit
369 * * fail on codes not expected in the test cases
370 * Returns if interception is handled / execution can be continued
371 */
uc_handle_sieic(FIXTURE_DATA (uc_kvm)* self)372 static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self)
373 {
374 struct kvm_s390_sie_block *sie_block = self->sie_block;
375 struct kvm_run *run = self->run;
376
377 /* check SIE interception code */
378 pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n",
379 run->s390_sieic.icptcode,
380 run->s390_sieic.ipa,
381 run->s390_sieic.ipb);
382 switch (run->s390_sieic.icptcode) {
383 case ICPT_INST:
384 /* end execution in caller on intercepted instruction */
385 pr_info("sie instruction interception\n");
386 return uc_handle_insn_ic(self);
387 case ICPT_KSS:
388 uc_skey_enable(self);
389 return true;
390 case ICPT_OPEREXC:
391 /* operation exception */
392 TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
393 default:
394 TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
395 }
396 return true;
397 }
398
399 /* verify VM state on exit */
uc_handle_exit(FIXTURE_DATA (uc_kvm)* self)400 static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self)
401 {
402 struct kvm_run *run = self->run;
403
404 switch (run->exit_reason) {
405 case KVM_EXIT_S390_UCONTROL:
406 /** check program interruption code
407 * handle page fault --> ucas map
408 */
409 uc_handle_exit_ucontrol(self);
410 break;
411 case KVM_EXIT_S390_SIEIC:
412 return uc_handle_sieic(self);
413 default:
414 pr_info("exit_reason %2d not handled\n", run->exit_reason);
415 }
416 return true;
417 }
418
419 /* run the VM until interrupted */
uc_run_once(FIXTURE_DATA (uc_kvm)* self)420 static int uc_run_once(FIXTURE_DATA(uc_kvm) *self)
421 {
422 int rc;
423
424 rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
425 print_run(self->run, self->sie_block);
426 print_regs(self->run);
427 pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
428 return rc;
429 }
430
uc_assert_diag44(FIXTURE_DATA (uc_kvm)* self)431 static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) *self)
432 {
433 struct kvm_s390_sie_block *sie_block = self->sie_block;
434
435 /* assert vm was interrupted by diag 0x0044 */
436 TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
437 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
438 TEST_ASSERT_EQ(0x8300, sie_block->ipa);
439 TEST_ASSERT_EQ(0x440000, sie_block->ipb);
440 }
441
TEST_F(uc_kvm,uc_no_user_region)442 TEST_F(uc_kvm, uc_no_user_region)
443 {
444 struct kvm_userspace_memory_region region = {
445 .slot = 1,
446 .guest_phys_addr = self->code_gpa,
447 .memory_size = VM_MEM_EXT_SIZE,
448 .userspace_addr = (uintptr_t)self->code_hva,
449 };
450 struct kvm_userspace_memory_region2 region2 = {
451 .slot = 1,
452 .guest_phys_addr = self->code_gpa,
453 .memory_size = VM_MEM_EXT_SIZE,
454 .userspace_addr = (uintptr_t)self->code_hva,
455 };
456
457 ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, ®ion));
458 ASSERT_TRUE(errno == EEXIST || errno == EINVAL)
459 TH_LOG("errno %s (%i) not expected for ioctl KVM_SET_USER_MEMORY_REGION",
460 strerror(errno), errno);
461
462 ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, ®ion2));
463 ASSERT_TRUE(errno == EEXIST || errno == EINVAL)
464 TH_LOG("errno %s (%i) not expected for ioctl KVM_SET_USER_MEMORY_REGION2",
465 strerror(errno), errno);
466 }
467
TEST_F(uc_kvm,uc_map_unmap)468 TEST_F(uc_kvm, uc_map_unmap)
469 {
470 struct kvm_sync_regs *sync_regs = &self->run->s.regs;
471 struct kvm_run *run = self->run;
472 const u64 disp = 1;
473 int rc;
474
475 /* copy test_mem_asm to code_hva / code_gpa */
476 TH_LOG("copy code %p to vm mapped memory %p / %p",
477 &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
478 memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
479
480 /* DAT disabled + 64 bit mode */
481 run->psw_mask = 0x0000000180000000ULL;
482 run->psw_addr = self->code_gpa;
483
484 /* set register content for test_mem_asm to access not mapped memory*/
485 sync_regs->gprs[1] = 0x55;
486 sync_regs->gprs[5] = self->base_gpa;
487 sync_regs->gprs[6] = VM_MEM_SIZE + disp;
488 run->kvm_dirty_regs |= KVM_SYNC_GPRS;
489
490 /* run and expect to fail with ucontrol pic segment translation */
491 ASSERT_EQ(0, uc_run_once(self));
492 ASSERT_EQ(1, sync_regs->gprs[0]);
493 ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
494
495 ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
496 ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
497
498 /* fail to map memory with not segment aligned address */
499 rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE);
500 ASSERT_GT(0, rc)
501 TH_LOG("ucas map for non segment address should fail but didn't; "
502 "result %d not expected, %s", rc, strerror(errno));
503
504 /* map / make additional memory available */
505 rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
506 ASSERT_EQ(0, rc)
507 TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
508 ASSERT_EQ(0, uc_run_once(self));
509 ASSERT_EQ(false, uc_handle_exit(self));
510 uc_assert_diag44(self);
511
512 /* assert registers and memory are in expected state */
513 ASSERT_EQ(2, sync_regs->gprs[0]);
514 ASSERT_EQ(0x55, sync_regs->gprs[1]);
515 ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp));
516
517 /* unmap and run loop again */
518 rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
519 ASSERT_EQ(0, rc)
520 TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno));
521 ASSERT_EQ(0, uc_run_once(self));
522 ASSERT_EQ(3, sync_regs->gprs[0]);
523 ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
524 ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
525 /* handle ucontrol exit and remap memory after previous map and unmap */
526 ASSERT_EQ(true, uc_handle_exit(self));
527 }
528
TEST_F(uc_kvm,uc_gprs)529 TEST_F(uc_kvm, uc_gprs)
530 {
531 struct kvm_sync_regs *sync_regs = &self->run->s.regs;
532 struct kvm_run *run = self->run;
533 struct kvm_regs regs = {};
534
535 /* Set registers to values that are different from the ones that we expect below */
536 for (int i = 0; i < 8; i++)
537 sync_regs->gprs[i] = 8;
538 run->kvm_dirty_regs |= KVM_SYNC_GPRS;
539
540 /* copy test_gprs_asm to code_hva / code_gpa */
541 TH_LOG("copy code %p to vm mapped memory %p / %p",
542 &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
543 memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
544
545 /* DAT disabled + 64 bit mode */
546 run->psw_mask = 0x0000000180000000ULL;
547 run->psw_addr = self->code_gpa;
548
549 /* run and expect interception of diag 44 */
550 ASSERT_EQ(0, uc_run_once(self));
551 ASSERT_EQ(false, uc_handle_exit(self));
552 uc_assert_diag44(self);
553
554 /* Retrieve and check guest register values */
555 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, ®s));
556 for (int i = 0; i < 8; i++) {
557 ASSERT_EQ(i, regs.gprs[i]);
558 ASSERT_EQ(i, sync_regs->gprs[i]);
559 }
560
561 /* run and expect interception of diag 44 again */
562 ASSERT_EQ(0, uc_run_once(self));
563 ASSERT_EQ(false, uc_handle_exit(self));
564 uc_assert_diag44(self);
565
566 /* check continued increment of register 0 value */
567 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, ®s));
568 ASSERT_EQ(1, regs.gprs[0]);
569 ASSERT_EQ(1, sync_regs->gprs[0]);
570 }
571
TEST_F(uc_kvm,uc_skey)572 TEST_F(uc_kvm, uc_skey)
573 {
574 struct kvm_s390_sie_block *sie_block = self->sie_block;
575 struct kvm_sync_regs *sync_regs = &self->run->s.regs;
576 u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2);
577 struct kvm_run *run = self->run;
578 const u8 skeyvalue = 0x34;
579
580 /* copy test_skey_asm to code_hva / code_gpa */
581 TH_LOG("copy code %p to vm mapped memory %p / %p",
582 &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa);
583 memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE);
584
585 /* set register content for test_skey_asm to access not mapped memory */
586 sync_regs->gprs[1] = skeyvalue;
587 sync_regs->gprs[5] = self->base_gpa;
588 sync_regs->gprs[6] = test_vaddr;
589 run->kvm_dirty_regs |= KVM_SYNC_GPRS;
590
591 /* DAT disabled + 64 bit mode */
592 run->psw_mask = 0x0000000180000000ULL;
593 run->psw_addr = self->code_gpa;
594
595 ASSERT_EQ(0, uc_run_once(self));
596 ASSERT_EQ(true, uc_handle_exit(self));
597 ASSERT_EQ(1, sync_regs->gprs[0]);
598
599 /* SSKE + ISKE */
600 sync_regs->gprs[1] = skeyvalue;
601 run->kvm_dirty_regs |= KVM_SYNC_GPRS;
602 ASSERT_EQ(0, uc_run_once(self));
603
604 /*
605 * Bail out and skip the test after uc_skey_enable was executed but iske
606 * is still intercepted. Instructions are not handled by the kernel.
607 * Thus there is no need to test this here.
608 */
609 TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS);
610 TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
611 TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
612 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
613 TEST_REQUIRE(sie_block->ipa != 0xb22b);
614
615 /* SSKE + ISKE contd. */
616 ASSERT_EQ(false, uc_handle_exit(self));
617 ASSERT_EQ(2, sync_regs->gprs[0]);
618 ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
619 uc_assert_diag44(self);
620
621 /* RRBE + ISKE */
622 sync_regs->gprs[1] = skeyvalue;
623 run->kvm_dirty_regs |= KVM_SYNC_GPRS;
624 ASSERT_EQ(0, uc_run_once(self));
625 ASSERT_EQ(false, uc_handle_exit(self));
626 ASSERT_EQ(3, sync_regs->gprs[0]);
627 /* assert R reset but rest of skey unchanged */
628 ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
629 ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
630 uc_assert_diag44(self);
631 }
632
633 static char uc_flic_b[PAGE_SIZE];
634 static struct kvm_s390_io_adapter uc_flic_ioa = { .id = 0 };
635 static struct kvm_s390_io_adapter_req uc_flic_ioam = { .id = 0 };
636 static struct kvm_s390_ais_req uc_flic_asim = { .isc = 0 };
637 static struct kvm_s390_ais_all uc_flic_asima = { .simm = 0 };
638 static struct uc_flic_attr_test {
639 char *name;
640 struct kvm_device_attr a;
641 int hasrc;
642 int geterrno;
643 int seterrno;
644 } uc_flic_attr_tests[] = {
645 {
646 .name = "KVM_DEV_FLIC_GET_ALL_IRQS",
647 .seterrno = EINVAL,
648 .a = {
649 .group = KVM_DEV_FLIC_GET_ALL_IRQS,
650 .addr = (u64)&uc_flic_b,
651 .attr = PAGE_SIZE,
652 },
653 },
654 {
655 .name = "KVM_DEV_FLIC_ENQUEUE",
656 .geterrno = EINVAL,
657 .a = { .group = KVM_DEV_FLIC_ENQUEUE, },
658 },
659 {
660 .name = "KVM_DEV_FLIC_CLEAR_IRQS",
661 .geterrno = EINVAL,
662 .a = { .group = KVM_DEV_FLIC_CLEAR_IRQS, },
663 },
664 {
665 .name = "KVM_DEV_FLIC_ADAPTER_REGISTER",
666 .geterrno = EINVAL,
667 .a = {
668 .group = KVM_DEV_FLIC_ADAPTER_REGISTER,
669 .addr = (u64)&uc_flic_ioa,
670 },
671 },
672 {
673 .name = "KVM_DEV_FLIC_ADAPTER_MODIFY",
674 .geterrno = EINVAL,
675 .seterrno = EINVAL,
676 .a = {
677 .group = KVM_DEV_FLIC_ADAPTER_MODIFY,
678 .addr = (u64)&uc_flic_ioam,
679 .attr = sizeof(uc_flic_ioam),
680 },
681 },
682 {
683 .name = "KVM_DEV_FLIC_CLEAR_IO_IRQ",
684 .geterrno = EINVAL,
685 .seterrno = EINVAL,
686 .a = {
687 .group = KVM_DEV_FLIC_CLEAR_IO_IRQ,
688 .attr = 32,
689 },
690 },
691 {
692 .name = "KVM_DEV_FLIC_AISM",
693 .geterrno = EINVAL,
694 .seterrno = ENOTSUP,
695 .a = {
696 .group = KVM_DEV_FLIC_AISM,
697 .addr = (u64)&uc_flic_asim,
698 },
699 },
700 {
701 .name = "KVM_DEV_FLIC_AIRQ_INJECT",
702 .geterrno = EINVAL,
703 .a = { .group = KVM_DEV_FLIC_AIRQ_INJECT, },
704 },
705 {
706 .name = "KVM_DEV_FLIC_AISM_ALL",
707 .geterrno = ENOTSUP,
708 .seterrno = ENOTSUP,
709 .a = {
710 .group = KVM_DEV_FLIC_AISM_ALL,
711 .addr = (u64)&uc_flic_asima,
712 .attr = sizeof(uc_flic_asima),
713 },
714 },
715 {
716 .name = "KVM_DEV_FLIC_APF_ENABLE",
717 .geterrno = EINVAL,
718 .seterrno = EINVAL,
719 .a = { .group = KVM_DEV_FLIC_APF_ENABLE, },
720 },
721 {
722 .name = "KVM_DEV_FLIC_APF_DISABLE_WAIT",
723 .geterrno = EINVAL,
724 .seterrno = EINVAL,
725 .a = { .group = KVM_DEV_FLIC_APF_DISABLE_WAIT, },
726 },
727 };
728
TEST_F(uc_kvm,uc_flic_attrs)729 TEST_F(uc_kvm, uc_flic_attrs)
730 {
731 struct kvm_create_device cd = { .type = KVM_DEV_TYPE_FLIC };
732 struct kvm_device_attr attr;
733 u64 value;
734 int rc, i;
735
736 rc = ioctl(self->vm_fd, KVM_CREATE_DEVICE, &cd);
737 ASSERT_EQ(0, rc) TH_LOG("create device failed with err %s (%i)",
738 strerror(errno), errno);
739
740 for (i = 0; i < ARRAY_SIZE(uc_flic_attr_tests); i++) {
741 TH_LOG("test %s", uc_flic_attr_tests[i].name);
742 attr = (struct kvm_device_attr) {
743 .group = uc_flic_attr_tests[i].a.group,
744 .attr = uc_flic_attr_tests[i].a.attr,
745 .addr = uc_flic_attr_tests[i].a.addr,
746 };
747 if (attr.addr == 0)
748 attr.addr = (u64)&value;
749
750 rc = ioctl(cd.fd, KVM_HAS_DEVICE_ATTR, &attr);
751 EXPECT_EQ(uc_flic_attr_tests[i].hasrc, !!rc)
752 TH_LOG("expected dev attr missing %s",
753 uc_flic_attr_tests[i].name);
754
755 rc = ioctl(cd.fd, KVM_GET_DEVICE_ATTR, &attr);
756 EXPECT_EQ(!!uc_flic_attr_tests[i].geterrno, !!rc)
757 TH_LOG("get dev attr rc not expected on %s %s (%i)",
758 uc_flic_attr_tests[i].name,
759 strerror(errno), errno);
760 if (uc_flic_attr_tests[i].geterrno)
761 EXPECT_EQ(uc_flic_attr_tests[i].geterrno, errno)
762 TH_LOG("get dev attr errno not expected on %s %s (%i)",
763 uc_flic_attr_tests[i].name,
764 strerror(errno), errno);
765
766 rc = ioctl(cd.fd, KVM_SET_DEVICE_ATTR, &attr);
767 EXPECT_EQ(!!uc_flic_attr_tests[i].seterrno, !!rc)
768 TH_LOG("set sev attr rc not expected on %s %s (%i)",
769 uc_flic_attr_tests[i].name,
770 strerror(errno), errno);
771 if (uc_flic_attr_tests[i].seterrno)
772 EXPECT_EQ(uc_flic_attr_tests[i].seterrno, errno)
773 TH_LOG("set dev attr errno not expected on %s %s (%i)",
774 uc_flic_attr_tests[i].name,
775 strerror(errno), errno);
776 }
777
778 close(cd.fd);
779 }
780
TEST_F(uc_kvm,uc_set_gsi_routing)781 TEST_F(uc_kvm, uc_set_gsi_routing)
782 {
783 struct kvm_irq_routing *routing = kvm_gsi_routing_create();
784 struct kvm_irq_routing_entry ue = {
785 .type = KVM_IRQ_ROUTING_S390_ADAPTER,
786 .gsi = 1,
787 .u.adapter = (struct kvm_irq_routing_s390_adapter) {
788 .ind_addr = 0,
789 },
790 };
791 int rc;
792
793 routing->entries[0] = ue;
794 routing->nr = 1;
795 rc = ioctl(self->vm_fd, KVM_SET_GSI_ROUTING, routing);
796 ASSERT_EQ(-1, rc) TH_LOG("err %s (%i)", strerror(errno), errno);
797 ASSERT_EQ(EINVAL, errno) TH_LOG("err %s (%i)", strerror(errno), errno);
798 }
799
800 TEST_HARNESS_MAIN
801