Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
43 TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0); in require_ucontrol_admin()
122 * create VM with single vcpu, map kvm_run and SIE control block for easy access
131 self->kvm_fd = open_kvm_dev_path_or_exit(); in FIXTURE_SETUP()
132 self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL); in FIXTURE_SETUP()
133 ASSERT_GE(self->vm_fd, 0); in FIXTURE_SETUP()
135 kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL, in FIXTURE_SETUP()
137 TH_LOG("create VM 0x%llx", info.cpuid); in FIXTURE_SETUP()
139 self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0); in FIXTURE_SETUP()
140 ASSERT_GE(self->vcpu_fd, 0); in FIXTURE_SETUP()
142 self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); in FIXTURE_SETUP()
143 ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run)) in FIXTURE_SETUP()
144 TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size)); in FIXTURE_SETUP()
145 self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size, in FIXTURE_SETUP()
146 PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0); in FIXTURE_SETUP()
147 ASSERT_NE(self->run, MAP_FAILED); in FIXTURE_SETUP()
151 * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of in FIXTURE_SETUP()
154 self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE, in FIXTURE_SETUP()
156 self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT); in FIXTURE_SETUP()
157 ASSERT_NE(self->sie_block, MAP_FAILED); in FIXTURE_SETUP()
159 TH_LOG("VM created %p %p", self->run, self->sie_block); in FIXTURE_SETUP()
161 self->base_gpa = 0; in FIXTURE_SETUP()
162 self->code_gpa = self->base_gpa + (3 * SZ_1M); in FIXTURE_SETUP()
164 self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M); in FIXTURE_SETUP()
165 ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno); in FIXTURE_SETUP()
166 self->base_hva = (uintptr_t)self->vm_mem; in FIXTURE_SETUP()
167 self->code_hva = self->base_hva - self->base_gpa + self->code_gpa; in FIXTURE_SETUP()
168 struct kvm_s390_ucas_mapping map = { in FIXTURE_SETUP() local
169 .user_addr = self->base_hva, in FIXTURE_SETUP()
170 .vcpu_addr = self->base_gpa, in FIXTURE_SETUP()
173 TH_LOG("ucas map %p %p 0x%llx", in FIXTURE_SETUP()
174 (void *)map.user_addr, (void *)map.vcpu_addr, map.length); in FIXTURE_SETUP()
175 rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map); in FIXTURE_SETUP()
176 ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s", in FIXTURE_SETUP()
179 TH_LOG("page in %p", (void *)self->base_gpa); in FIXTURE_SETUP()
180 rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa); in FIXTURE_SETUP()
182 (void *)self->base_hva, rc, strerror(errno)); in FIXTURE_SETUP()
184 self->sie_block->cpuflags &= ~CPUSTAT_STOPPED; in FIXTURE_SETUP()
189 munmap(self->sie_block, PAGE_SIZE); in FIXTURE_TEARDOWN()
190 munmap(self->run, self->kvm_run_size); in FIXTURE_TEARDOWN()
191 close(self->vcpu_fd); in FIXTURE_TEARDOWN()
192 close(self->vm_fd); in FIXTURE_TEARDOWN()
193 close(self->kvm_fd); in FIXTURE_TEARDOWN()
194 free(self->vm_mem); in FIXTURE_TEARDOWN()
200 EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI); in TEST_F()
213 rc = ioctl(self->vm_fd, KVM_HAS_DEVICE_ATTR, &attr); in TEST_F()
216 rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr); in TEST_F()
221 rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr); in TEST_F()
222 EXPECT_EQ(-1, rc); in TEST_F()
231 rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog); in TEST_F()
232 EXPECT_EQ(-1, rc); in TEST_F()
237 * Assert HPAGE CAP cannot be enabled on UCONTROL VM
252 /* assert hpages are not supported on ucontrol vm */ in TEST()
256 /* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */ in TEST()
258 EXPECT_EQ(-1, rc); in TEST()
265 EXPECT_EQ(-1, rc); in TEST()
276 return (void *)(self->base_hva - self->base_gpa + gpa); in gpa2hva()
279 /* map / make additional memory available */
282 struct kvm_s390_ucas_mapping map = { in uc_map_ext() local
287 pr_info("ucas map %p %p 0x%llx", in uc_map_ext()
288 (void *)map.user_addr, (void *)map.vcpu_addr, map.length); in uc_map_ext()
289 return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map); in uc_map_ext()
295 struct kvm_s390_ucas_mapping map = { in uc_unmap_ext() local
301 (void *)map.user_addr, (void *)map.vcpu_addr, map.length); in uc_unmap_ext()
302 return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map); in uc_unmap_ext()
308 struct kvm_run *run = self->run; in uc_handle_exit_ucontrol()
312 TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); in uc_handle_exit_ucontrol()
313 switch (run->s390_ucontrol.pgm_code) { in uc_handle_exit_ucontrol()
315 seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1); in uc_handle_exit_ucontrol()
317 run->s390_ucontrol.trans_exc_code, seg_addr); in uc_handle_exit_ucontrol()
318 /* map / make additional memory available */ in uc_handle_exit_ucontrol()
323 TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code); in uc_handle_exit_ucontrol()
334 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_skey_enable()
337 sie_block->cpuflags &= ~CPUSTAT_KSS; in uc_skey_enable()
339 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in uc_skey_enable()
348 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_handle_insn_ic()
349 int ilen = insn_length(sie_block->ipa >> 8); in uc_handle_insn_ic()
350 struct kvm_run *run = self->run; in uc_handle_insn_ic()
352 switch (run->s390_sieic.ipa) { in uc_handle_insn_ic()
359 run->psw_addr = run->psw_addr - ilen; in uc_handle_insn_ic()
360 pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr); in uc_handle_insn_ic()
374 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_handle_sieic()
375 struct kvm_run *run = self->run; in uc_handle_sieic()
379 run->s390_sieic.icptcode, in uc_handle_sieic()
380 run->s390_sieic.ipa, in uc_handle_sieic()
381 run->s390_sieic.ipb); in uc_handle_sieic()
382 switch (run->s390_sieic.icptcode) { in uc_handle_sieic()
392 TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb); in uc_handle_sieic()
394 TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode); in uc_handle_sieic()
399 /* verify VM state on exit */
402 struct kvm_run *run = self->run; in uc_handle_exit()
404 switch (run->exit_reason) { in uc_handle_exit()
407 * handle page fault --> ucas map in uc_handle_exit()
414 pr_info("exit_reason %2d not handled\n", run->exit_reason); in uc_handle_exit()
419 /* run the VM until interrupted */
424 rc = ioctl(self->vcpu_fd, KVM_RUN, NULL); in uc_run_once()
425 print_run(self->run, self->sie_block); in uc_run_once()
426 print_regs(self->run); in uc_run_once()
433 struct kvm_s390_sie_block *sie_block = self->sie_block; in uc_assert_diag44()
435 /* assert vm was interrupted by diag 0x0044 */ in uc_assert_diag44()
436 TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason); in uc_assert_diag44()
437 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode); in uc_assert_diag44()
438 TEST_ASSERT_EQ(0x8300, sie_block->ipa); in uc_assert_diag44()
439 TEST_ASSERT_EQ(0x440000, sie_block->ipb); in uc_assert_diag44()
446 .guest_phys_addr = self->code_gpa, in TEST_F()
448 .userspace_addr = (uintptr_t)self->code_hva, in TEST_F()
452 .guest_phys_addr = self->code_gpa, in TEST_F()
454 .userspace_addr = (uintptr_t)self->code_hva, in TEST_F()
457 ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region)); in TEST_F()
462 ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2)); in TEST_F()
470 struct kvm_sync_regs *sync_regs = &self->run->s.regs; in TEST_F()
471 struct kvm_run *run = self->run; in TEST_F()
476 TH_LOG("copy code %p to vm mapped memory %p / %p", in TEST_F()
477 &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa); in TEST_F()
478 memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE); in TEST_F()
481 run->psw_mask = 0x0000000180000000ULL; in TEST_F()
482 run->psw_addr = self->code_gpa; in TEST_F()
485 sync_regs->gprs[1] = 0x55; in TEST_F()
486 sync_regs->gprs[5] = self->base_gpa; in TEST_F()
487 sync_regs->gprs[6] = VM_MEM_SIZE + disp; in TEST_F()
488 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
492 ASSERT_EQ(1, sync_regs->gprs[0]); in TEST_F()
493 ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); in TEST_F()
495 ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); in TEST_F()
496 ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code); in TEST_F()
498 /* fail to map memory with not segment aligned address */ in TEST_F()
499 rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE); in TEST_F()
501 TH_LOG("ucas map for non segment address should fail but didn't; " in TEST_F()
504 /* map / make additional memory available */ in TEST_F()
505 rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE); in TEST_F()
507 TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno)); in TEST_F()
513 ASSERT_EQ(2, sync_regs->gprs[0]); in TEST_F()
514 ASSERT_EQ(0x55, sync_regs->gprs[1]); in TEST_F()
515 ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp)); in TEST_F()
518 rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE); in TEST_F()
522 ASSERT_EQ(3, sync_regs->gprs[0]); in TEST_F()
523 ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); in TEST_F()
524 ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); in TEST_F()
525 /* handle ucontrol exit and remap memory after previous map and unmap */ in TEST_F()
531 struct kvm_sync_regs *sync_regs = &self->run->s.regs; in TEST_F()
532 struct kvm_run *run = self->run; in TEST_F()
537 sync_regs->gprs[i] = 8; in TEST_F()
538 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
541 TH_LOG("copy code %p to vm mapped memory %p / %p", in TEST_F()
542 &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa); in TEST_F()
543 memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE); in TEST_F()
546 run->psw_mask = 0x0000000180000000ULL; in TEST_F()
547 run->psw_addr = self->code_gpa; in TEST_F()
555 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs)); in TEST_F()
558 ASSERT_EQ(i, sync_regs->gprs[i]); in TEST_F()
567 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs)); in TEST_F()
569 ASSERT_EQ(1, sync_regs->gprs[0]); in TEST_F()
574 struct kvm_s390_sie_block *sie_block = self->sie_block; in TEST_F()
575 struct kvm_sync_regs *sync_regs = &self->run->s.regs; in TEST_F()
576 u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); in TEST_F()
577 struct kvm_run *run = self->run; in TEST_F()
581 TH_LOG("copy code %p to vm mapped memory %p / %p", in TEST_F()
582 &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa); in TEST_F()
583 memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE); in TEST_F()
586 sync_regs->gprs[1] = skeyvalue; in TEST_F()
587 sync_regs->gprs[5] = self->base_gpa; in TEST_F()
588 sync_regs->gprs[6] = test_vaddr; in TEST_F()
589 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
592 run->psw_mask = 0x0000000180000000ULL; in TEST_F()
593 run->psw_addr = self->code_gpa; in TEST_F()
597 ASSERT_EQ(1, sync_regs->gprs[0]); in TEST_F()
600 sync_regs->gprs[1] = skeyvalue; in TEST_F()
601 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
609 TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS); in TEST_F()
610 TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)); in TEST_F()
611 TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason); in TEST_F()
612 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode); in TEST_F()
613 TEST_REQUIRE(sie_block->ipa != 0xb22b); in TEST_F()
617 ASSERT_EQ(2, sync_regs->gprs[0]); in TEST_F()
618 ASSERT_EQ(skeyvalue, sync_regs->gprs[1]); in TEST_F()
622 sync_regs->gprs[1] = skeyvalue; in TEST_F()
623 run->kvm_dirty_regs |= KVM_SYNC_GPRS; in TEST_F()
626 ASSERT_EQ(3, sync_regs->gprs[0]); in TEST_F()
628 ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]); in TEST_F()
629 ASSERT_EQ(0, sync_regs->gprs[1] & 0x04); in TEST_F()
736 rc = ioctl(self->vm_fd, KVM_CREATE_DEVICE, &cd); in TEST_F()
793 routing->entries[0] = ue; in TEST_F()
794 routing->nr = 1; in TEST_F()
795 rc = ioctl(self->vm_fd, KVM_SET_GSI_ROUTING, routing); in TEST_F()
796 ASSERT_EQ(-1, rc) TH_LOG("err %s (%i)", strerror(errno), errno); in TEST_F()