Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
43 TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0); in require_ucontrol_admin()
122 * create VM with single vcpu, map kvm_run and SIE control block for easy access
131 self->kvm_fd = open_kvm_dev_path_or_exit(); in FIXTURE_SETUP()
132 self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL); in FIXTURE_SETUP()
133 ASSERT_GE(self->vm_fd, 0); in FIXTURE_SETUP()
135 kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL, in FIXTURE_SETUP()
137 TH_LOG("create VM 0x%llx", info.cpuid); in FIXTURE_SETUP()
139 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0); in FIXTURE_SETUP()
140 ASSERT_GE(self->vcpu_fd, 0); in FIXTURE_SETUP()
142 self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); in FIXTURE_SETUP()
143 ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run)) in FIXTURE_SETUP()
144 TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size)); in FIXTURE_SETUP()
145 self->run = kvm_mmap(self->kvm_run_size, PROT_READ | PROT_WRITE, in FIXTURE_SETUP()
146 MAP_SHARED, self->vcpu_fd); in FIXTURE_SETUP()
150 * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of in FIXTURE_SETUP()
153 self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE, in FIXTURE_SETUP()
154 MAP_SHARED, self->vcpu_fd, in FIXTURE_SETUP()
157 TH_LOG("VM created %p %p", self->run, self->sie_block); in FIXTURE_SETUP()
159 self->base_gpa = 0; in FIXTURE_SETUP()
160 self->code_gpa = self->base_gpa + (3 * SZ_1M); in FIXTURE_SETUP()
162 self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M); in FIXTURE_SETUP()
163 ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno); in FIXTURE_SETUP()
164 self->base_hva = (uintptr_t)self->vm_mem; in FIXTURE_SETUP()
165 self->code_hva = self->base_hva - self->base_gpa + self->code_gpa; in FIXTURE_SETUP()
166 struct kvm_s390_ucas_mapping map = { in FIXTURE_SETUP() local
167 .user_addr = self->base_hva, in FIXTURE_SETUP()
168 .vcpu_addr = self->base_gpa, in FIXTURE_SETUP()
171 TH_LOG("ucas map %p %p 0x%llx", in FIXTURE_SETUP()
172 (void *)map.user_addr, (void *)map.vcpu_addr, map.length); in FIXTURE_SETUP()
173 rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map); in FIXTURE_SETUP()
174 ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s", in FIXTURE_SETUP()
177 TH_LOG("page in %p", (void *)self->base_gpa); in FIXTURE_SETUP()
178 rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa); in FIXTURE_SETUP()
180 (void *)self->base_hva, rc, strerror(errno)); in FIXTURE_SETUP()
182 self->sie_block->cpuflags &= ~CPUSTAT_STOPPED; in FIXTURE_SETUP()
187 kvm_munmap(self->sie_block, PAGE_SIZE); in FIXTURE_TEARDOWN()
188 kvm_munmap(self->run, self->kvm_run_size); in FIXTURE_TEARDOWN()
189 close(self->vcpu_fd); in FIXTURE_TEARDOWN()
190 close(self->vm_fd); in FIXTURE_TEARDOWN()
191 close(self->kvm_fd); in FIXTURE_TEARDOWN()
192 free(self->vm_mem); in FIXTURE_TEARDOWN()
198 EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI); in TEST_F()
211 rc = ioctl(self->vm_fd, KVM_HAS_DEVICE_ATTR, &attr); in TEST_F()
214 rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr); in TEST_F()
219 rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr); in TEST_F()
220 EXPECT_EQ(-1, rc); in TEST_F()
229 rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog); in TEST_F()
230 EXPECT_EQ(-1, rc); in TEST_F()
235 * Assert HPAGE CAP cannot be enabled on UCONTROL VM
250 /* assert hpages are not supported on ucontrol vm */ in TEST()
254 /* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */ in TEST()
256 EXPECT_EQ(-1, rc); in TEST()
263 EXPECT_EQ(-1, rc); in TEST()
274 return (void *)(self->base_hva - self->base_gpa + gpa); in gpa2hva()
277 /* map / make additional memory available */
280 struct kvm_s390_ucas_mapping map = { in uc_map_ext() local
285 pr_info("ucas map %p %p 0x%llx", in uc_map_ext()
286 (void *)map.user_addr, (void *)map.vcpu_addr, map.length); in uc_map_ext()
287 return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map); in uc_map_ext()
293 struct kvm_s390_ucas_mapping map = { in uc_unmap_ext() local
299 (void *)map.user_addr, (void *)map.vcpu_addr, map.length); in uc_unmap_ext()
300 return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map); in uc_unmap_ext()
306 struct kvm_run *run = self->run; in uc_handle_exit_ucontrol()
310 TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); in uc_handle_exit_ucontrol()
311 switch (run->s390_ucontrol.pgm_code) { in uc_handle_exit_ucontrol()
313 seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1); in uc_handle_exit_ucontrol()
315 run->s390_ucontrol.trans_exc_code, seg_addr); in uc_handle_exit_ucontrol()
316 /* map / make additional memory available */ in uc_handle_exit_ucontrol()
321 TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code); in uc_handle_exit_ucontrol()
332 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_skey_enable()
335 sie_block->cpuflags &= ~CPUSTAT_KSS; in uc_skey_enable()
337 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in uc_skey_enable()
346 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_handle_insn_ic()
347 int ilen = insn_length(sie_block->ipa >> 8); in uc_handle_insn_ic()
348 struct kvm_run *run = self->run; in uc_handle_insn_ic()
350 switch (run->s390_sieic.ipa) { in uc_handle_insn_ic()
357 run->psw_addr = run->psw_addr - ilen; in uc_handle_insn_ic()
358 pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr); in uc_handle_insn_ic()
372 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_handle_sieic()
373 struct kvm_run *run = self->run; in uc_handle_sieic()
377 run->s390_sieic.icptcode, in uc_handle_sieic()
378 run->s390_sieic.ipa, in uc_handle_sieic()
379 run->s390_sieic.ipb); in uc_handle_sieic()
380 switch (run->s390_sieic.icptcode) { in uc_handle_sieic()
390 TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb); in uc_handle_sieic()
392 TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode); in uc_handle_sieic()
397 /* verify VM state on exit */
400 struct kvm_run *run = self->run; in uc_handle_exit()
402 switch (run->exit_reason) { in uc_handle_exit()
405 * handle page fault --> ucas map in uc_handle_exit()
412 pr_info("exit_reason %2d not handled\n", run->exit_reason); in uc_handle_exit()
417 /* run the VM until interrupted */
422 rc = ioctl(self->vcpu_fd, KVM_RUN, NULL); in uc_run_once()
423 print_run(self->run, self->sie_block); in uc_run_once()
424 print_regs(self->run); in uc_run_once()
431 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_assert_diag44()
433 /* assert vm was interrupted by diag 0x0044 */ in uc_assert_diag44()
434 TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason); in uc_assert_diag44()
435 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode); in uc_assert_diag44()
436 TEST_ASSERT_EQ(0x8300, sie_block->ipa); in uc_assert_diag44()
437 TEST_ASSERT_EQ(0x440000, sie_block->ipb); in uc_assert_diag44()
444 .guest_phys_addr = self->code_gpa, in TEST_F()
446 .userspace_addr = (uintptr_t)self->code_hva, in TEST_F()
450 .guest_phys_addr = self->code_gpa, in TEST_F()
452 .userspace_addr = (uintptr_t)self->code_hva, in TEST_F()
455 ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region)); in TEST_F()
460 ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2)); in TEST_F()
468 struct kvm_sync_regs *sync_regs = &self->run->s.regs; in TEST_F()
469 struct kvm_run *run = self->run; in TEST_F()
474 TH_LOG("copy code %p to vm mapped memory %p / %p", in TEST_F()
475 &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa); in TEST_F()
476 memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE); in TEST_F()
479 run->psw_mask = 0x0000000180000000ULL; in TEST_F()
480 run->psw_addr = self->code_gpa; in TEST_F()
483 sync_regs->gprs[1] = 0x55; in TEST_F()
484 sync_regs->gprs[5] = self->base_gpa; in TEST_F()
485 sync_regs->gprs[6] = VM_MEM_SIZE + disp; in TEST_F()
486 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
490 ASSERT_EQ(1, sync_regs->gprs[0]); in TEST_F()
491 ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); in TEST_F()
493 ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); in TEST_F()
494 ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code); in TEST_F()
496 /* fail to map memory with not segment aligned address */ in TEST_F()
497 rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE); in TEST_F()
499 TH_LOG("ucas map for non segment address should fail but didn't; " in TEST_F()
502 /* map / make additional memory available */ in TEST_F()
503 rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE); in TEST_F()
505 TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno)); in TEST_F()
511 ASSERT_EQ(2, sync_regs->gprs[0]); in TEST_F()
512 ASSERT_EQ(0x55, sync_regs->gprs[1]); in TEST_F()
513 ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp)); in TEST_F()
516 rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE); in TEST_F()
520 ASSERT_EQ(3, sync_regs->gprs[0]); in TEST_F()
521 ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); in TEST_F()
522 ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); in TEST_F()
523 /* handle ucontrol exit and remap memory after previous map and unmap */ in TEST_F()
529 struct kvm_sync_regs *sync_regs = &self->run->s.regs; in TEST_F()
530 struct kvm_run *run = self->run; in TEST_F()
535 sync_regs->gprs[i] = 8; in TEST_F()
536 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
539 TH_LOG("copy code %p to vm mapped memory %p / %p", in TEST_F()
540 &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa); in TEST_F()
541 memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE); in TEST_F()
544 run->psw_mask = 0x0000000180000000ULL; in TEST_F()
545 run->psw_addr = self->code_gpa; in TEST_F()
553 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs)); in TEST_F()
556 ASSERT_EQ(i, sync_regs->gprs[i]); in TEST_F()
565 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs)); in TEST_F()
567 ASSERT_EQ(1, sync_regs->gprs[0]); in TEST_F()
572 struct kvm_s390_sie_block *sie_block = self->sie_block; in TEST_F()
573 struct kvm_sync_regs *sync_regs = &self->run->s.regs; in TEST_F()
574 u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); in TEST_F()
575 struct kvm_run *run = self->run; in TEST_F()
579 TH_LOG("copy code %p to vm mapped memory %p / %p", in TEST_F()
580 &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa); in TEST_F()
581 memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE); in TEST_F()
584 sync_regs->gprs[1] = skeyvalue; in TEST_F()
585 sync_regs->gprs[5] = self->base_gpa; in TEST_F()
586 sync_regs->gprs[6] = test_vaddr; in TEST_F()
587 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
590 run->psw_mask = 0x0000000180000000ULL; in TEST_F()
591 run->psw_addr = self->code_gpa; in TEST_F()
595 ASSERT_EQ(1, sync_regs->gprs[0]); in TEST_F()
598 sync_regs->gprs[1] = skeyvalue; in TEST_F()
599 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
607 TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS); in TEST_F()
608 TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)); in TEST_F()
609 TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason); in TEST_F()
610 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode); in TEST_F()
611 TEST_REQUIRE(sie_block->ipa != 0xb22b); in TEST_F()
615 ASSERT_EQ(2, sync_regs->gprs[0]); in TEST_F()
616 ASSERT_EQ(skeyvalue, sync_regs->gprs[1]); in TEST_F()
620 sync_regs->gprs[1] = skeyvalue; in TEST_F()
621 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
624 ASSERT_EQ(3, sync_regs->gprs[0]); in TEST_F()
626 ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]); in TEST_F()
627 ASSERT_EQ(0, sync_regs->gprs[1] & 0x04); in TEST_F()
734 rc = ioctl(self->vm_fd, KVM_CREATE_DEVICE, &cd); in TEST_F()
791 routing->entries[0] = ue; in TEST_F()
792 routing->nr = 1; in TEST_F()
793 rc = ioctl(self->vm_fd, KVM_SET_GSI_ROUTING, routing); in TEST_F()
794 ASSERT_EQ(-1, rc) TH_LOG("err %s (%i)", strerror(errno), errno); in TEST_F()