Lines Matching +full:fault +full:- +full:inject

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
93 static_assert(sizeof(struct tss32) == 104, "compile-time assertion failed");
125 if (usd->sd_gran) in usd_to_seg_desc()
129 seg_desc.access = usd->sd_type | usd->sd_dpl << 5 | usd->sd_p << 7; in usd_to_seg_desc()
130 seg_desc.access |= usd->sd_xx << 12; in usd_to_seg_desc()
131 seg_desc.access |= usd->sd_def32 << 14; in usd_to_seg_desc()
132 seg_desc.access |= usd->sd_gran << 15; in usd_to_seg_desc()
138 * Inject an exception with an error code that is a segment selector.
154 * Bit 2 from the selector is retained as-is in the error code. in sel_exception()
170 * and non-zero otherwise.
185 return (-1); in desc_table_limit_check()
189 return (-1); in desc_table_limit_check()
200 * Returns -1 otherwise.
249 * Returns -1 otherwise.
263 if (ts->reason == TSR_IRET) in read_tss_descriptor()
264 sel_exception(vcpu, IDT_TS, sel, ts->ext); in read_tss_descriptor()
266 sel_exception(vcpu, IDT_GP, sel, ts->ext); in read_tss_descriptor()
270 sup_paging = ts->paging; in read_tss_descriptor()
344 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
350 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
358 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
361 seg_desc->base = 0; in validate_seg_desc()
362 seg_desc->limit = 0; in validate_seg_desc()
363 seg_desc->access = 0x10000; /* unusable */ in validate_seg_desc()
368 sup_paging = ts->paging; in validate_seg_desc()
379 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
391 sel_exception(vcpu, idtvec, sel, ts->ext); in validate_seg_desc()
401 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
409 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
416 * A data segment is always non-conforming except when it's in validate_seg_desc()
425 sel_exception(vcpu, IDT_TS, sel, ts->ext); in validate_seg_desc()
439 tss->tss_eax = GETREG(vcpu, VM_REG_GUEST_RAX); in tss32_save()
440 tss->tss_ecx = GETREG(vcpu, VM_REG_GUEST_RCX); in tss32_save()
441 tss->tss_edx = GETREG(vcpu, VM_REG_GUEST_RDX); in tss32_save()
442 tss->tss_ebx = GETREG(vcpu, VM_REG_GUEST_RBX); in tss32_save()
443 tss->tss_esp = GETREG(vcpu, VM_REG_GUEST_RSP); in tss32_save()
444 tss->tss_ebp = GETREG(vcpu, VM_REG_GUEST_RBP); in tss32_save()
445 tss->tss_esi = GETREG(vcpu, VM_REG_GUEST_RSI); in tss32_save()
446 tss->tss_edi = GETREG(vcpu, VM_REG_GUEST_RDI); in tss32_save()
449 tss->tss_es = GETREG(vcpu, VM_REG_GUEST_ES); in tss32_save()
450 tss->tss_cs = GETREG(vcpu, VM_REG_GUEST_CS); in tss32_save()
451 tss->tss_ss = GETREG(vcpu, VM_REG_GUEST_SS); in tss32_save()
452 tss->tss_ds = GETREG(vcpu, VM_REG_GUEST_DS); in tss32_save()
453 tss->tss_fs = GETREG(vcpu, VM_REG_GUEST_FS); in tss32_save()
454 tss->tss_gs = GETREG(vcpu, VM_REG_GUEST_GS); in tss32_save()
457 tss->tss_eflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS); in tss32_save()
458 if (task_switch->reason == TSR_IRET) in tss32_save()
459 tss->tss_eflags &= ~PSL_NT; in tss32_save()
460 tss->tss_eip = eip; in tss32_save()
471 error = vm_set_desc(vcpu, reg, sd->base, sd->limit, sd->access); in update_seg_desc()
489 if (ts->reason != TSR_IRET && ts->reason != TSR_JMP) { in tss32_restore()
490 tss->tss_link = ot_sel; in tss32_restore()
494 eflags = tss->tss_eflags; in tss32_restore()
499 SETREG(vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt); in tss32_restore()
502 if (ts->paging.paging_mode != PAGING_MODE_FLAT) { in tss32_restore()
503 if (ts->paging.paging_mode == PAGING_MODE_PAE) { in tss32_restore()
505 * XXX Assuming 36-bit MAXPHYADDR. in tss32_restore()
507 maxphyaddr = (1UL << 36) - 1; in tss32_restore()
508 pdpte = paddr_guest2host(ctx, tss->tss_cr3 & ~0x1f, 32); in tss32_restore()
528 SETREG(vcpu, VM_REG_GUEST_CR3, tss->tss_cr3); in tss32_restore()
529 ts->paging.cr3 = tss->tss_cr3; in tss32_restore()
534 SETREG(vcpu, VM_REG_GUEST_RIP, tss->tss_eip); in tss32_restore()
537 SETREG(vcpu, VM_REG_GUEST_RAX, tss->tss_eax); in tss32_restore()
538 SETREG(vcpu, VM_REG_GUEST_RCX, tss->tss_ecx); in tss32_restore()
539 SETREG(vcpu, VM_REG_GUEST_RDX, tss->tss_edx); in tss32_restore()
540 SETREG(vcpu, VM_REG_GUEST_RBX, tss->tss_ebx); in tss32_restore()
541 SETREG(vcpu, VM_REG_GUEST_RSP, tss->tss_esp); in tss32_restore()
542 SETREG(vcpu, VM_REG_GUEST_RBP, tss->tss_ebp); in tss32_restore()
543 SETREG(vcpu, VM_REG_GUEST_RSI, tss->tss_esi); in tss32_restore()
544 SETREG(vcpu, VM_REG_GUEST_RDI, tss->tss_edi); in tss32_restore()
547 SETREG(vcpu, VM_REG_GUEST_ES, tss->tss_es); in tss32_restore()
548 SETREG(vcpu, VM_REG_GUEST_CS, tss->tss_cs); in tss32_restore()
549 SETREG(vcpu, VM_REG_GUEST_SS, tss->tss_ss); in tss32_restore()
550 SETREG(vcpu, VM_REG_GUEST_DS, tss->tss_ds); in tss32_restore()
551 SETREG(vcpu, VM_REG_GUEST_FS, tss->tss_fs); in tss32_restore()
552 SETREG(vcpu, VM_REG_GUEST_GS, tss->tss_gs); in tss32_restore()
571 * The SS and CS attribute checks on VM-entry are inter-dependent so in tss32_restore()
574 * VM-entry checks so the guest can handle any exception injected in tss32_restore()
588 ts->paging.cpl = tss->tss_cs & SEL_RPL_MASK; in tss32_restore()
655 * stack-segment descriptor determines the size of the stack in push_errcode()
656 * pointer outside of 64-bit mode. in push_errcode()
664 esp -= bytes; in push_errcode()
666 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, in push_errcode()
673 if (vie_alignment_check(paging->cpl, bytes, cr0, rflags, gla)) { in push_errcode()
693 #define CHKERR(error,fault) \ argument
698 else if (fault) \
716 int error, ext, fault, minlimit, nt_type, ot_type; in vmexit_task_switch() local
720 vmexit = vmrun->vm_exit; in vmexit_task_switch()
721 task_switch = &vmexit->u.task_switch; in vmexit_task_switch()
722 nt_sel = task_switch->tsssel; in vmexit_task_switch()
723 ext = vmexit->u.task_switch.ext; in vmexit_task_switch()
724 reason = vmexit->u.task_switch.reason; in vmexit_task_switch()
725 paging = &vmexit->u.task_switch.paging; in vmexit_task_switch()
727 assert(paging->cpu_mode == CPU_MODE_PROTECTED); in vmexit_task_switch()
732 eip = vmexit->rip + vmexit->inst_length; in vmexit_task_switch()
737 * - accesses to GDT or LDT to load segment descriptors in vmexit_task_switch()
738 * - accesses to the task state segment during task switch in vmexit_task_switch()
745 &fault); in vmexit_task_switch()
746 CHKERR(error, fault); in vmexit_task_switch()
765 * TSS must have a minimum length of 104 bytes for a 32-bit TSS and in vmexit_task_switch()
766 * 44 bytes for a 16-bit TSS. in vmexit_task_switch()
769 minlimit = 104 - 1; in vmexit_task_switch()
771 minlimit = 44 - 1; in vmexit_task_switch()
798 PROT_READ | PROT_WRITE, nt_iov, nitems(nt_iov), &fault); in vmexit_task_switch()
799 CHKERR(error, fault); in vmexit_task_switch()
808 * TR would contain the values from power-on: in vmexit_task_switch()
811 sel_exception(vcpu, IDT_TS, ot_sel, task_switch->ext); in vmexit_task_switch()
825 &fault); in vmexit_task_switch()
826 CHKERR(error, fault); in vmexit_task_switch()
830 PROT_READ | PROT_WRITE, ot_iov, nitems(ot_iov), &fault); in vmexit_task_switch()
831 CHKERR(error, fault); in vmexit_task_switch()
841 &ot_desc, &fault); in vmexit_task_switch()
842 CHKERR(error, fault); in vmexit_task_switch()
846 EPRINTLN("Task switch to 16-bit TSS not supported"); in vmexit_task_switch()
860 &nt_desc, &fault); in vmexit_task_switch()
861 CHKERR(error, fault); in vmexit_task_switch()
885 &fault); in vmexit_task_switch()
886 CHKERR(error, fault); in vmexit_task_switch()
893 if (task_switch->errcode_valid) { in vmexit_task_switch()
894 assert(task_switch->ext); in vmexit_task_switch()
895 assert(task_switch->reason == TSR_IDT_GATE); in vmexit_task_switch()
896 error = push_errcode(vcpu, &task_switch->paging, nt_type, in vmexit_task_switch()
897 task_switch->errcode, &fault); in vmexit_task_switch()
898 CHKERR(error, fault); in vmexit_task_switch()
902 * Treatment of virtual-NMI blocking if NMI is delivered through in vmexit_task_switch()
906 * If the virtual NMIs VM-execution control is 1, VM entry injects in vmexit_task_switch()
908 * a VM exit, virtual-NMI blocking is in effect before the VM exit in vmexit_task_switch()
911 * Thus, virtual-NMI blocking is in effect at the time of the task in vmexit_task_switch()
916 * Treatment of virtual-NMI unblocking on IRET from NMI handler task. in vmexit_task_switch()
918 * Section "Changes to Instruction Behavior in VMX Non-Root Operation" in vmexit_task_switch()
919 * If "virtual NMIs" control is 1 IRET removes any virtual-NMI blocking. in vmexit_task_switch()
920 * This unblocking of virtual-NMI occurs even if IRET causes a fault. in vmexit_task_switch()
922 * Thus, virtual-NMI blocking is cleared at the time of the task switch in vmexit_task_switch()
931 if (task_switch->reason == TSR_IDT_GATE) { in vmexit_task_switch()
937 * XXX should inject debug exception if 'T' bit is 1 in vmexit_task_switch()