Lines Matching full:vcpu
112 void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) in kvm_smm_changed() argument
114 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed()
117 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed()
119 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed()
122 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
129 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed()
132 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
135 void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
137 vcpu->arch.smi_pending = true; in process_smi()
138 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi()
155 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, in enter_smm_save_seg_32() argument
161 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_32()
169 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, in enter_smm_save_seg_64() argument
175 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_64()
183 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, in enter_smm_save_state_32() argument
189 smram->cr0 = kvm_read_cr0(vcpu); in enter_smm_save_state_32()
190 smram->cr3 = kvm_read_cr3(vcpu); in enter_smm_save_state_32()
191 smram->eflags = kvm_get_rflags(vcpu); in enter_smm_save_state_32()
192 smram->eip = kvm_rip_read(vcpu); in enter_smm_save_state_32()
195 smram->gprs[i] = kvm_register_read_raw(vcpu, i); in enter_smm_save_state_32()
197 smram->dr6 = (u32)vcpu->arch.dr6; in enter_smm_save_state_32()
198 smram->dr7 = (u32)vcpu->arch.dr7; in enter_smm_save_state_32()
200 enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); in enter_smm_save_state_32()
201 enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); in enter_smm_save_state_32()
203 kvm_x86_call(get_gdt)(vcpu, &dt); in enter_smm_save_state_32()
207 kvm_x86_call(get_idt)(vcpu, &dt); in enter_smm_save_state_32()
211 enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES); in enter_smm_save_state_32()
212 enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS); in enter_smm_save_state_32()
213 enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS); in enter_smm_save_state_32()
215 enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS); in enter_smm_save_state_32()
216 enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS); in enter_smm_save_state_32()
217 enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS); in enter_smm_save_state_32()
219 smram->cr4 = kvm_read_cr4(vcpu); in enter_smm_save_state_32()
221 smram->smbase = vcpu->arch.smbase; in enter_smm_save_state_32()
223 smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in enter_smm_save_state_32()
227 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, in enter_smm_save_state_64() argument
234 smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i); in enter_smm_save_state_64()
236 smram->rip = kvm_rip_read(vcpu); in enter_smm_save_state_64()
237 smram->rflags = kvm_get_rflags(vcpu); in enter_smm_save_state_64()
239 smram->dr6 = vcpu->arch.dr6; in enter_smm_save_state_64()
240 smram->dr7 = vcpu->arch.dr7; in enter_smm_save_state_64()
242 smram->cr0 = kvm_read_cr0(vcpu); in enter_smm_save_state_64()
243 smram->cr3 = kvm_read_cr3(vcpu); in enter_smm_save_state_64()
244 smram->cr4 = kvm_read_cr4(vcpu); in enter_smm_save_state_64()
246 smram->smbase = vcpu->arch.smbase; in enter_smm_save_state_64()
249 smram->efer = vcpu->arch.efer; in enter_smm_save_state_64()
251 enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR); in enter_smm_save_state_64()
253 kvm_x86_call(get_idt)(vcpu, &dt); in enter_smm_save_state_64()
257 enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR); in enter_smm_save_state_64()
259 kvm_x86_call(get_gdt)(vcpu, &dt); in enter_smm_save_state_64()
263 enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES); in enter_smm_save_state_64()
264 enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS); in enter_smm_save_state_64()
265 enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS); in enter_smm_save_state_64()
266 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS); in enter_smm_save_state_64()
267 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS); in enter_smm_save_state_64()
268 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS); in enter_smm_save_state_64()
270 smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in enter_smm_save_state_64()
274 void enter_smm(struct kvm_vcpu *vcpu) in enter_smm() argument
286 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
287 enter_smm_save_state_64(vcpu, &smram.smram64); in enter_smm()
290 enter_smm_save_state_32(vcpu, &smram.smram32); in enter_smm()
293 * Give enter_smm() a chance to make ISA-specific changes to the vCPU in enter_smm()
300 if (kvm_x86_call(enter_smm)(vcpu, &smram)) in enter_smm()
303 kvm_smm_changed(vcpu, true); in enter_smm()
305 if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram))) in enter_smm()
308 if (kvm_x86_call(get_nmi_mask)(vcpu)) in enter_smm()
309 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
311 kvm_x86_call(set_nmi_mask)(vcpu, true); in enter_smm()
313 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in enter_smm()
314 kvm_rip_write(vcpu, 0x8000); in enter_smm()
316 kvm_x86_call(set_interrupt_shadow)(vcpu, 0); in enter_smm()
318 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
319 kvm_x86_call(set_cr0)(vcpu, cr0); in enter_smm()
321 kvm_x86_call(set_cr4)(vcpu, 0); in enter_smm()
325 kvm_x86_call(set_idt)(vcpu, &dt); in enter_smm()
327 if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1))) in enter_smm()
330 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
331 cs.base = vcpu->arch.smbase; in enter_smm()
348 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in enter_smm()
349 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); in enter_smm()
350 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); in enter_smm()
351 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); in enter_smm()
352 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); in enter_smm()
353 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); in enter_smm()
356 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
357 if (kvm_x86_call(set_efer)(vcpu, 0)) in enter_smm()
361 kvm_update_cpuid_runtime(vcpu); in enter_smm()
362 kvm_mmu_reset_context(vcpu); in enter_smm()
365 kvm_vm_dead(vcpu->kvm); in enter_smm()
383 static int rsm_load_seg_32(struct kvm_vcpu *vcpu, in rsm_load_seg_32() argument
393 kvm_set_segment(vcpu, &desc, n); in rsm_load_seg_32()
399 static int rsm_load_seg_64(struct kvm_vcpu *vcpu, in rsm_load_seg_64() argument
409 kvm_set_segment(vcpu, &desc, n); in rsm_load_seg_64()
414 static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, in rsm_enter_protected_mode() argument
427 bad = kvm_set_cr3(vcpu, cr3); in rsm_enter_protected_mode()
436 bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); in rsm_enter_protected_mode()
440 bad = kvm_set_cr0(vcpu, cr0); in rsm_enter_protected_mode()
445 bad = kvm_set_cr4(vcpu, cr4); in rsm_enter_protected_mode()
449 bad = kvm_set_cr3(vcpu, cr3 | pcid); in rsm_enter_protected_mode()
462 struct kvm_vcpu *vcpu = ctxt->vcpu; in rsm_load_state_32() local
472 if (kvm_set_dr(vcpu, 6, smstate->dr6)) in rsm_load_state_32()
474 if (kvm_set_dr(vcpu, 7, smstate->dr7)) in rsm_load_state_32()
477 rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR); in rsm_load_state_32()
478 rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR); in rsm_load_state_32()
482 kvm_x86_call(set_gdt)(vcpu, &dt); in rsm_load_state_32()
486 kvm_x86_call(set_idt)(vcpu, &dt); in rsm_load_state_32()
488 rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES); in rsm_load_state_32()
489 rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS); in rsm_load_state_32()
490 rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS); in rsm_load_state_32()
492 rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS); in rsm_load_state_32()
493 rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS); in rsm_load_state_32()
494 rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS); in rsm_load_state_32()
496 vcpu->arch.smbase = smstate->smbase; in rsm_load_state_32()
498 r = rsm_enter_protected_mode(vcpu, smstate->cr0, in rsm_load_state_32()
504 kvm_x86_call(set_interrupt_shadow)(vcpu, 0); in rsm_load_state_32()
514 struct kvm_vcpu *vcpu = ctxt->vcpu; in rsm_load_state_64() local
524 if (kvm_set_dr(vcpu, 6, smstate->dr6)) in rsm_load_state_64()
526 if (kvm_set_dr(vcpu, 7, smstate->dr7)) in rsm_load_state_64()
529 vcpu->arch.smbase = smstate->smbase; in rsm_load_state_64()
531 if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA)) in rsm_load_state_64()
534 rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR); in rsm_load_state_64()
538 kvm_x86_call(set_idt)(vcpu, &dt); in rsm_load_state_64()
540 rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR); in rsm_load_state_64()
544 kvm_x86_call(set_gdt)(vcpu, &dt); in rsm_load_state_64()
546 r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4); in rsm_load_state_64()
550 rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES); in rsm_load_state_64()
551 rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS); in rsm_load_state_64()
552 rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS); in rsm_load_state_64()
553 rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS); in rsm_load_state_64()
554 rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS); in rsm_load_state_64()
555 rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS); in rsm_load_state_64()
557 kvm_x86_call(set_interrupt_shadow)(vcpu, 0); in rsm_load_state_64()
566 struct kvm_vcpu *vcpu = ctxt->vcpu; in emulator_leave_smm() local
572 smbase = vcpu->arch.smbase; in emulator_leave_smm()
574 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram)); in emulator_leave_smm()
578 if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0) in emulator_leave_smm()
579 kvm_x86_call(set_nmi_mask)(vcpu, false); in emulator_leave_smm()
581 kvm_smm_changed(vcpu, false); in emulator_leave_smm()
585 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU in emulator_leave_smm()
589 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { in emulator_leave_smm()
594 cr4 = kvm_read_cr4(vcpu); in emulator_leave_smm()
596 kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE); in emulator_leave_smm()
602 kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS); in emulator_leave_smm()
607 cr0 = kvm_read_cr0(vcpu); in emulator_leave_smm()
609 kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); in emulator_leave_smm()
612 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { in emulator_leave_smm()
616 cr4 = kvm_read_cr4(vcpu); in emulator_leave_smm()
618 kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE); in emulator_leave_smm()
622 kvm_set_msr(vcpu, MSR_EFER, efer); in emulator_leave_smm()
633 if (kvm_x86_call(leave_smm)(vcpu, &smram)) in emulator_leave_smm()
637 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in emulator_leave_smm()
646 * flawed handling of RSM to L2 (see above), the vCPU may already be in emulator_leave_smm()
647 * in_guest_mode(). Force the vCPU out of guest mode before delivering in emulator_leave_smm()
651 if (ret != X86EMUL_CONTINUE && is_guest_mode(vcpu)) in emulator_leave_smm()
652 kvm_leave_nested(vcpu); in emulator_leave_smm()