Lines Matching +full:disable +full:- +full:mmu +full:- +full:reset
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
63 #include "mmu.h"
66 #include "reset.h"
136 * SCTLR_EL2_M: MMU on in arm_setup_vectors()
142 * ~SCTLR_EL2_EE: Data accesses are little-endian in arm_setup_vectors()
153 WRITE_SPECIALREG(vtcr_el2, el2_regs->vtcr_el2); in arm_setup_vectors()
168 vmm_call_hyp(vmmpmap_to_ttbr0(), stack_top, el2_regs->tcr_el2, in arm_setup_vectors()
169 sctlr_el2, el2_regs->vtcr_el2); in arm_setup_vectors()
181 * vmm_cleanup() will disable the MMU. For the next few instructions, in arm_teardown_vectors()
182 * before the hardware disables the MMU, one of the following is in arm_teardown_vectors()
185 * a. The instruction addresses are fetched with the MMU disabled, in arm_teardown_vectors()
284 /* Initialise the EL2 MMU */ in vmmops_modinit()
286 printf("vmm: Failed to init the EL2 MMU\n"); in vmmops_modinit()
300 * EL2. EL2 code is identity-mapped; the allocator is used to in vmmops_modinit()
307 hyp_code_len = round_page(&vmm_hyp_code_end - &vmm_hyp_code); in vmmops_modinit()
309 /* We need an physical identity mapping for when we activate the MMU */ in vmmops_modinit()
317 /* Create a per-CPU hypervisor stack */ in vmmops_modinit()
334 el2_regs.tcr_el2 |= TCR_EL2_T0SZ(64 - EL2_VIRT_BITS); in vmmops_modinit()
387 el2_regs.vtcr_el2 |= VTCR_EL2_T0SZ(64 - vmm_virt_bits); in vmmops_modinit()
424 vmm_base -= L2_SIZE + PAGE_SIZE; in vmmops_modinit()
434 vmem_add(el2_mem_alloc, L2_SIZE, vmm_base - L2_SIZE, in vmmops_modinit()
443 if (next_hyp_va < HYP_VM_MAX_ADDRESS - PAGE_SIZE) in vmmops_modinit()
445 HYP_VM_MAX_ADDRESS - next_hyp_va, M_WAITOK); in vmmops_modinit()
524 hyp->vm = vm; in vmmops_init()
525 hyp->vgic_attached = false; in vmmops_init()
531 hyp->el2_addr = el2_map_enter((vm_offset_t)hyp, size, in vmmops_init()
547 KASSERT(vcpuid >= 0 && vcpuid < vm_get_maxcpus(hyp->vm), in vmmops_vcpu_init()
549 hyp->ctx[vcpuid] = hypctx; in vmmops_vcpu_init()
551 hypctx->hyp = hyp; in vmmops_vcpu_init()
552 hypctx->vcpu = vcpu1; in vmmops_vcpu_init()
561 hypctx->el2_addr = el2_map_enter((vm_offset_t)hypctx, size, in vmmops_vcpu_init()
592 printf("esr_el2: 0x%016lx\n", vme->u.hyp.esr_el2); in arm64_print_hyp_regs()
593 printf("far_el2: 0x%016lx\n", vme->u.hyp.far_el2); in arm64_print_hyp_regs()
594 printf("hpfar_el2: 0x%016lx\n", vme->u.hyp.hpfar_el2); in arm64_print_hyp_regs()
595 printf("elr_el2: 0x%016lx\n", vme->pc); in arm64_print_hyp_regs()
609 vme_ret->u.inst_emul.gpa = in arm64_gen_inst_emul_data()
610 HPFAR_EL2_FIPA_ADDR(hypctx->exit_info.hpfar_el2); in arm64_gen_inst_emul_data()
612 vme_ret->u.inst_emul.gpa += hypctx->exit_info.far_el2 & in arm64_gen_inst_emul_data()
618 vie = &vme_ret->u.inst_emul.vie; in arm64_gen_inst_emul_data()
619 vie->access_size = 1 << esr_sas; in arm64_gen_inst_emul_data()
620 vie->sign_extend = (esr_iss & ISS_DATA_SSE) ? 1 : 0; in arm64_gen_inst_emul_data()
621 vie->dir = (esr_iss & ISS_DATA_WnR) ? VM_DIR_WRITE : VM_DIR_READ; in arm64_gen_inst_emul_data()
622 vie->reg = reg_num; in arm64_gen_inst_emul_data()
624 paging = &vme_ret->u.inst_emul.paging; in arm64_gen_inst_emul_data()
625 paging->ttbr0_addr = hypctx->ttbr0_el1 & ~(TTBR_ASID_MASK | TTBR_CnP); in arm64_gen_inst_emul_data()
626 paging->ttbr1_addr = hypctx->ttbr1_el1 & ~(TTBR_ASID_MASK | TTBR_CnP); in arm64_gen_inst_emul_data()
627 paging->tcr_el1 = hypctx->tcr_el1; in arm64_gen_inst_emul_data()
628 paging->tcr2_el1 = hypctx->tcr2_el1; in arm64_gen_inst_emul_data()
629 paging->flags = hypctx->tf.tf_spsr & (PSR_M_MASK | PSR_M_32); in arm64_gen_inst_emul_data()
630 if ((hypctx->sctlr_el1 & SCTLR_M) != 0) in arm64_gen_inst_emul_data()
631 paging->flags |= VM_GP_MMU_ENABLED; in arm64_gen_inst_emul_data()
641 vre = &vme_ret->u.reg_emul.vre; in arm64_gen_reg_emul_data()
643 vre->inst_syndrome = esr_iss; in arm64_gen_reg_emul_data()
644 /* ARMv8 Architecture Manual, p. D7-2273: 1 means read */ in arm64_gen_reg_emul_data()
645 vre->dir = (esr_iss & ISS_MSR_DIR) ? VM_DIR_READ : VM_DIR_WRITE; in arm64_gen_reg_emul_data()
647 vre->reg = reg_num; in arm64_gen_reg_emul_data()
655 if ((hypctx->tf.tf_spsr & PSR_M_MASK) == PSR_M_EL0t) in raise_data_insn_abort()
659 /* Set the bit that changes from insn -> data abort */ in raise_data_insn_abort()
663 esr |= hypctx->tf.tf_esr & ESR_ELx_IL; in raise_data_insn_abort()
675 esr_ec = ESR_ELx_EXCEPTION(hypctx->tf.tf_esr); in handle_el1_sync_excp()
676 esr_iss = hypctx->tf.tf_esr & ESR_ELx_ISS_MASK; in handle_el1_sync_excp()
680 vmm_stat_incr(hypctx->vcpu, VMEXIT_UNKNOWN, 1); in handle_el1_sync_excp()
682 vme_ret->exitcode = VM_EXITCODE_HYP; in handle_el1_sync_excp()
685 if ((hypctx->tf.tf_esr & 0x3) == 0) { /* WFI */ in handle_el1_sync_excp()
686 vmm_stat_incr(hypctx->vcpu, VMEXIT_WFI, 1); in handle_el1_sync_excp()
687 vme_ret->exitcode = VM_EXITCODE_WFI; in handle_el1_sync_excp()
689 vmm_stat_incr(hypctx->vcpu, VMEXIT_WFE, 1); in handle_el1_sync_excp()
690 vme_ret->exitcode = VM_EXITCODE_HYP; in handle_el1_sync_excp()
694 vmm_stat_incr(hypctx->vcpu, VMEXIT_HVC, 1); in handle_el1_sync_excp()
695 vme_ret->exitcode = VM_EXITCODE_HVC; in handle_el1_sync_excp()
698 vmm_stat_incr(hypctx->vcpu, VMEXIT_MSR, 1); in handle_el1_sync_excp()
700 vme_ret->exitcode = VM_EXITCODE_REG_EMUL; in handle_el1_sync_excp()
703 vmm_stat_incr(hypctx->vcpu, VMEXIT_BRK, 1); in handle_el1_sync_excp()
704 vme_ret->exitcode = VM_EXITCODE_BRK; in handle_el1_sync_excp()
707 vmm_stat_incr(hypctx->vcpu, VMEXIT_SS, 1); in handle_el1_sync_excp()
708 vme_ret->exitcode = VM_EXITCODE_SS; in handle_el1_sync_excp()
712 vmm_stat_incr(hypctx->vcpu, esr_ec == EXCP_DATA_ABORT_L ? in handle_el1_sync_excp()
714 switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) { in handle_el1_sync_excp()
725 gpa = HPFAR_EL2_FIPA_ADDR(hypctx->exit_info.hpfar_el2); in handle_el1_sync_excp()
729 hypctx->exit_info.far_el2, in handle_el1_sync_excp()
732 vme_ret->inst_length = 0; in handle_el1_sync_excp()
736 if (vm_mem_allocated(hypctx->vcpu, gpa)) { in handle_el1_sync_excp()
737 vme_ret->exitcode = VM_EXITCODE_PAGING; in handle_el1_sync_excp()
738 vme_ret->inst_length = 0; in handle_el1_sync_excp()
739 vme_ret->u.paging.esr = hypctx->tf.tf_esr; in handle_el1_sync_excp()
740 vme_ret->u.paging.gpa = gpa; in handle_el1_sync_excp()
747 hypctx->exit_info.far_el2, false, in handle_el1_sync_excp()
749 vme_ret->inst_length = 0; in handle_el1_sync_excp()
754 vme_ret->exitcode = VM_EXITCODE_INST_EMUL; in handle_el1_sync_excp()
759 vme_ret->exitcode = VM_EXITCODE_HYP; in handle_el1_sync_excp()
766 vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED_SYNC, 1); in handle_el1_sync_excp()
768 vme_ret->exitcode = VM_EXITCODE_HYP; in handle_el1_sync_excp()
791 vmm_stat_incr(hypctx->vcpu, in arm64_handle_world_switch()
793 vme->exitcode = VM_EXITCODE_BOGUS; in arm64_handle_world_switch()
802 vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED_EL2, 1); in arm64_handle_world_switch()
803 vme->exitcode = VM_EXITCODE_BOGUS; in arm64_handle_world_switch()
808 vmm_stat_incr(hypctx->vcpu, VMEXIT_UNHANDLED, 1); in arm64_handle_world_switch()
809 vme->exitcode = VM_EXITCODE_BOGUS; in arm64_handle_world_switch()
848 /* Check if the MMU is off */ in vmmops_gla2gpa()
849 if ((paging->flags & VM_GP_MMU_ENABLED) == 0) { in vmmops_gla2gpa()
855 is_el0 = (paging->flags & PSR_M_MASK) == PSR_M_EL0t; in vmmops_gla2gpa()
859 if ((paging->tcr_el1 & TCR_EPD1) != 0) { in vmmops_gla2gpa()
863 if (is_el0 && (paging->tcr_el1 & TCR_E0PD1) != 0) { in vmmops_gla2gpa()
867 pte_addr = paging->ttbr1_addr; in vmmops_gla2gpa()
868 tsz = (paging->tcr_el1 & TCR_T1SZ_MASK) >> TCR_T1SZ_SHIFT; in vmmops_gla2gpa()
870 if ((paging->tcr_el1 & TCR_TBI1) != 0) in vmmops_gla2gpa()
872 switch (paging->tcr_el1 & TCR_TG1_MASK) { in vmmops_gla2gpa()
888 if ((paging->tcr_el1 & TCR_EPD0) != 0) { in vmmops_gla2gpa()
892 if (is_el0 && (paging->tcr_el1 & TCR_E0PD0) != 0) { in vmmops_gla2gpa()
896 pte_addr = paging->ttbr0_addr; in vmmops_gla2gpa()
897 tsz = (paging->tcr_el1 & TCR_T0SZ_MASK) >> TCR_T0SZ_SHIFT; in vmmops_gla2gpa()
899 if ((paging->tcr_el1 & TCR_TBI0) != 0) in vmmops_gla2gpa()
901 switch (paging->tcr_el1 & TCR_TG0_MASK) { in vmmops_gla2gpa()
925 * See "Table D8-11 4KB granule, determining stage 1 initial in vmmops_gla2gpa()
926 * lookup level" and "Table D8-21 16KB granule, determining in vmmops_gla2gpa()
928 * Reference Manual for A-Profile architecture" revision I.a in vmmops_gla2gpa()
951 ia_bits = 64 - tsz; in vmmops_gla2gpa()
958 address_bits = ia_bits - granule_shift; in vmmops_gla2gpa()
962 * granule_shift - PTE_SHIFT bits of the input address. in vmmops_gla2gpa()
966 levels = howmany(address_bits, granule_shift - PTE_SHIFT); in vmmops_gla2gpa()
969 gla &= (1ul << ia_bits) - 1; in vmmops_gla2gpa()
973 for (;levels > 0; levels--) { in vmmops_gla2gpa()
976 pte_shift = (levels - 1) * (granule_shift - PTE_SHIFT) + in vmmops_gla2gpa()
979 ((1ul << (granule_shift - PTE_SHIFT)) - 1); in vmmops_gla2gpa()
981 idx -= PAGE_SIZE / sizeof(pte); in vmmops_gla2gpa()
985 ptep = ptp_hold(hypctx->vcpu, pte_addr, PAGE_SIZE, &cookie); in vmmops_gla2gpa()
994 /* TODO: Level -1 when FEAT_LPA2 is implemented */ in vmmops_gla2gpa()
1046 mask = (1ul << pte_shift) - 1; in vmmops_gla2gpa()
1074 hyp = hypctx->hyp; in vmmops_run()
1075 vcpu = hypctx->vcpu; in vmmops_run()
1078 hypctx->tf.tf_elr = (uint64_t)pc; in vmmops_run()
1081 if (hypctx->has_exception) { in vmmops_run()
1082 hypctx->has_exception = false; in vmmops_run()
1083 hypctx->elr_el1 = hypctx->tf.tf_elr; in vmmops_run()
1085 mode = hypctx->tf.tf_spsr & (PSR_M_MASK | PSR_M_32); in vmmops_run()
1088 hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x0; in vmmops_run()
1090 hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x200; in vmmops_run()
1092 /* 64-bit EL0 */ in vmmops_run()
1093 hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x400; in vmmops_run()
1095 /* 32-bit EL0 */ in vmmops_run()
1096 hypctx->tf.tf_elr = hypctx->vbar_el1 + 0x600; in vmmops_run()
1100 hypctx->spsr_el1 = hypctx->tf.tf_spsr; in vmmops_run()
1103 hypctx->tf.tf_spsr = hypctx->spsr_el1 & PSR_FLAGS; in vmmops_run()
1104 hypctx->tf.tf_spsr |= PSR_DAIF | PSR_M_EL1h; in vmmops_run()
1110 if ((hypctx->sctlr_el1 & SCTLR_SPAN) == 0) in vmmops_run()
1111 hypctx->tf.tf_spsr |= PSR_PAN; in vmmops_run()
1112 if ((hypctx->sctlr_el1 & SCTLR_DSSBS) == 0) in vmmops_run()
1113 hypctx->tf.tf_spsr &= ~PSR_SSBS; in vmmops_run()
1115 hypctx->tf.tf_spsr |= PSR_SSBS; in vmmops_run()
1135 hyp->vttbr_el2 = pmap_to_ttbr0(pmap); in vmmops_run()
1160 vme->pc = hypctx->tf.tf_elr; in vmmops_run()
1161 vme->inst_length = INSN_SIZE; in vmmops_run()
1162 vme->u.hyp.exception_nr = excp_type; in vmmops_run()
1163 vme->u.hyp.esr_el2 = hypctx->tf.tf_esr; in vmmops_run()
1164 vme->u.hyp.far_el2 = hypctx->exit_info.far_el2; in vmmops_run()
1165 vme->u.hyp.hpfar_el2 = hypctx->exit_info.hpfar_el2; in vmmops_run()
1174 hypctx->tf.tf_elr += vme->inst_length; in vmmops_run()
1187 maxcpus = vm_get_maxcpus(hyp->vm); in arm_pcpu_vmcleanup()
1189 if (arm64_get_active_vcpu() == hyp->ctx[i]) { in arm_pcpu_vmcleanup()
1205 vmmpmap_remove(hypctx->el2_addr, el2_hypctx_size(), true); in vmmops_vcpu_cleanup()
1221 vmmpmap_remove(hyp->el2_addr, el2_hyp_size(hyp->vm), true); in vmmops_cleanup()
1235 return (&hypctx->tf.tf_x[reg]); in hypctx_regptr()
1237 return (&hypctx->tf.tf_lr); in hypctx_regptr()
1239 return (&hypctx->tf.tf_sp); in hypctx_regptr()
1241 return (&hypctx->tf.tf_spsr); in hypctx_regptr()
1243 return (&hypctx->tf.tf_elr); in hypctx_regptr()
1245 return (&hypctx->sctlr_el1); in hypctx_regptr()
1247 return (&hypctx->ttbr0_el1); in hypctx_regptr()
1249 return (&hypctx->ttbr1_el1); in hypctx_regptr()
1251 return (&hypctx->tcr_el1); in hypctx_regptr()
1253 return (&hypctx->tcr2_el1); in hypctx_regptr()
1267 running = vcpu_is_running(hypctx->vcpu, &hostcpu); in vmmops_getreg()
1269 panic("arm_getreg: %s%d is running", vm_name(hypctx->hyp->vm), in vmmops_getreg()
1270 vcpu_vcpuid(hypctx->vcpu)); in vmmops_getreg()
1287 running = vcpu_is_running(hypctx->vcpu, &hostcpu); in vmmops_setreg()
1289 panic("arm_setreg: %s%d is running", vm_name(hypctx->hyp->vm), in vmmops_setreg()
1290 vcpu_vcpuid(hypctx->vcpu)); in vmmops_setreg()
1306 running = vcpu_is_running(hypctx->vcpu, &hostcpu); in vmmops_exception()
1308 panic("%s: %s%d is running", __func__, vm_name(hypctx->hyp->vm), in vmmops_exception()
1309 vcpu_vcpuid(hypctx->vcpu)); in vmmops_exception()
1311 hypctx->far_el1 = far; in vmmops_exception()
1312 hypctx->esr_el1 = esr; in vmmops_exception()
1313 hypctx->has_exception = true; in vmmops_exception()
1334 *retval = (hypctx->setcaps & (1ul << num)) != 0; in vmmops_getcap()
1353 if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0)) in vmmops_setcap()
1356 hypctx->mdcr_el2 |= MDCR_EL2_TDE; in vmmops_setcap()
1358 hypctx->mdcr_el2 &= ~MDCR_EL2_TDE; in vmmops_setcap()
1361 if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0)) in vmmops_setcap()
1365 hypctx->debug_spsr |= (hypctx->tf.tf_spsr & PSR_SS); in vmmops_setcap()
1366 hypctx->debug_mdscr |= hypctx->mdscr_el1 & in vmmops_setcap()
1369 hypctx->tf.tf_spsr |= PSR_SS; in vmmops_setcap()
1370 hypctx->mdscr_el1 |= MDSCR_SS | MDSCR_KDE; in vmmops_setcap()
1371 hypctx->mdcr_el2 |= MDCR_EL2_TDE; in vmmops_setcap()
1373 hypctx->tf.tf_spsr &= ~PSR_SS; in vmmops_setcap()
1374 hypctx->tf.tf_spsr |= hypctx->debug_spsr; in vmmops_setcap()
1375 hypctx->debug_spsr &= ~PSR_SS; in vmmops_setcap()
1376 hypctx->mdscr_el1 &= ~(MDSCR_SS | MDSCR_KDE); in vmmops_setcap()
1377 hypctx->mdscr_el1 |= hypctx->debug_mdscr; in vmmops_setcap()
1378 hypctx->debug_mdscr &= ~(MDSCR_SS | MDSCR_KDE); in vmmops_setcap()
1379 hypctx->mdcr_el2 &= ~MDCR_EL2_TDE; in vmmops_setcap()
1383 if ((val != 0) == ((hypctx->setcaps & (1ul << num)) != 0)) in vmmops_setcap()
1387 hypctx->debug_spsr |= (hypctx->tf.tf_spsr & in vmmops_setcap()
1389 hypctx->tf.tf_spsr |= PSR_I | PSR_F; in vmmops_setcap()
1391 hypctx->tf.tf_spsr &= ~(PSR_I | PSR_F); in vmmops_setcap()
1392 hypctx->tf.tf_spsr |= (hypctx->debug_spsr & in vmmops_setcap()
1394 hypctx->debug_spsr &= ~(PSR_I | PSR_F); in vmmops_setcap()
1404 hypctx->setcaps &= ~(1ul << num); in vmmops_setcap()
1406 hypctx->setcaps |= (1ul << num); in vmmops_setcap()