Lines Matching +full:es +full:- +full:enable

1 /* SPDX-License-Identifier: GPL-2.0 */
13 ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
16 ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
41 CHECK_SMRAM32_OFFSET(es, 0xFF84); in check_smram_offsets()
61 CHECK_SMRAM64_OFFSET(es, 0xFE00); in check_smram_offsets()
114 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); in kvm_smm_changed()
117 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_smm_changed()
119 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK); in kvm_smm_changed()
129 vcpu->arch.pdptrs_from_userspace = false; in kvm_smm_changed()
138 vcpu->arch.smi_pending = true; in process_smi()
145 flags |= seg->g << 23; in enter_smm_get_segment_flags()
146 flags |= seg->db << 22; in enter_smm_get_segment_flags()
147 flags |= seg->l << 21; in enter_smm_get_segment_flags()
148 flags |= seg->avl << 20; in enter_smm_get_segment_flags()
149 flags |= seg->present << 15; in enter_smm_get_segment_flags()
150 flags |= seg->dpl << 13; in enter_smm_get_segment_flags()
151 flags |= seg->s << 12; in enter_smm_get_segment_flags()
152 flags |= seg->type << 8; in enter_smm_get_segment_flags()
164 state->base = seg.base; in enter_smm_save_seg_32()
165 state->limit = seg.limit; in enter_smm_save_seg_32()
166 state->flags = enter_smm_get_segment_flags(&seg); in enter_smm_save_seg_32()
177 state->selector = seg.selector; in enter_smm_save_seg_64()
178 state->attributes = enter_smm_get_segment_flags(&seg) >> 8; in enter_smm_save_seg_64()
179 state->limit = seg.limit; in enter_smm_save_seg_64()
180 state->base = seg.base; in enter_smm_save_seg_64()
190 smram->cr0 = kvm_read_cr0(vcpu); in enter_smm_save_state_32()
191 smram->cr3 = kvm_read_cr3(vcpu); in enter_smm_save_state_32()
192 smram->eflags = kvm_get_rflags(vcpu); in enter_smm_save_state_32()
193 smram->eip = kvm_rip_read(vcpu); in enter_smm_save_state_32()
196 smram->gprs[i] = kvm_register_read_raw(vcpu, i); in enter_smm_save_state_32()
198 smram->dr6 = (u32)vcpu->arch.dr6; in enter_smm_save_state_32()
199 smram->dr7 = (u32)vcpu->arch.dr7; in enter_smm_save_state_32()
201 enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); in enter_smm_save_state_32()
202 enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); in enter_smm_save_state_32()
205 smram->gdtr.base = dt.address; in enter_smm_save_state_32()
206 smram->gdtr.limit = dt.size; in enter_smm_save_state_32()
209 smram->idtr.base = dt.address; in enter_smm_save_state_32()
210 smram->idtr.limit = dt.size; in enter_smm_save_state_32()
212 enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES); in enter_smm_save_state_32()
213 enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS); in enter_smm_save_state_32()
214 enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS); in enter_smm_save_state_32()
216 enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS); in enter_smm_save_state_32()
217 enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS); in enter_smm_save_state_32()
218 enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS); in enter_smm_save_state_32()
220 smram->cr4 = kvm_read_cr4(vcpu); in enter_smm_save_state_32()
221 smram->smm_revision = 0x00020000; in enter_smm_save_state_32()
222 smram->smbase = vcpu->arch.smbase; in enter_smm_save_state_32()
224 smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in enter_smm_save_state_32()
235 smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i); in enter_smm_save_state_64()
237 smram->rip = kvm_rip_read(vcpu); in enter_smm_save_state_64()
238 smram->rflags = kvm_get_rflags(vcpu); in enter_smm_save_state_64()
240 smram->dr6 = vcpu->arch.dr6; in enter_smm_save_state_64()
241 smram->dr7 = vcpu->arch.dr7; in enter_smm_save_state_64()
243 smram->cr0 = kvm_read_cr0(vcpu); in enter_smm_save_state_64()
244 smram->cr3 = kvm_read_cr3(vcpu); in enter_smm_save_state_64()
245 smram->cr4 = kvm_read_cr4(vcpu); in enter_smm_save_state_64()
247 smram->smbase = vcpu->arch.smbase; in enter_smm_save_state_64()
248 smram->smm_revison = 0x00020064; in enter_smm_save_state_64()
250 smram->efer = vcpu->arch.efer; in enter_smm_save_state_64()
252 enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR); in enter_smm_save_state_64()
255 smram->idtr.limit = dt.size; in enter_smm_save_state_64()
256 smram->idtr.base = dt.address; in enter_smm_save_state_64()
258 enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR); in enter_smm_save_state_64()
261 smram->gdtr.limit = dt.size; in enter_smm_save_state_64()
262 smram->gdtr.base = dt.address; in enter_smm_save_state_64()
264 enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES); in enter_smm_save_state_64()
265 enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS); in enter_smm_save_state_64()
266 enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS); in enter_smm_save_state_64()
267 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS); in enter_smm_save_state_64()
268 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS); in enter_smm_save_state_64()
269 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS); in enter_smm_save_state_64()
271 smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); in enter_smm_save_state_64()
274 kvm_msr_read(vcpu, MSR_KVM_INTERNAL_GUEST_SSP, &smram->ssp)) in enter_smm_save_state_64()
298 * Give enter_smm() a chance to make ISA-specific changes to the vCPU in enter_smm()
300 * SMM state-save area. in enter_smm()
310 if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram))) in enter_smm()
314 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
323 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
335 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
336 cs.base = vcpu->arch.smbase; in enter_smm()
366 vcpu->arch.cpuid_dynamic_bits_dirty = true; in enter_smm()
370 kvm_vm_dead(vcpu->kvm); in enter_smm()
375 desc->g = (flags >> 23) & 1; in rsm_set_desc_flags()
376 desc->db = (flags >> 22) & 1; in rsm_set_desc_flags()
377 desc->l = (flags >> 21) & 1; in rsm_set_desc_flags()
378 desc->avl = (flags >> 20) & 1; in rsm_set_desc_flags()
379 desc->present = (flags >> 15) & 1; in rsm_set_desc_flags()
380 desc->dpl = (flags >> 13) & 3; in rsm_set_desc_flags()
381 desc->s = (flags >> 12) & 1; in rsm_set_desc_flags()
382 desc->type = (flags >> 8) & 15; in rsm_set_desc_flags()
384 desc->unusable = !desc->present; in rsm_set_desc_flags()
385 desc->padding = 0; in rsm_set_desc_flags()
395 desc.base = state->base; in rsm_load_seg_32()
396 desc.limit = state->limit; in rsm_load_seg_32()
397 rsm_set_desc_flags(&desc, state->flags); in rsm_load_seg_32()
410 desc.selector = state->selector; in rsm_load_seg_64()
411 rsm_set_desc_flags(&desc, state->attributes << 8); in rsm_load_seg_64()
412 desc.limit = state->limit; in rsm_load_seg_64()
413 desc.base = state->base; in rsm_load_seg_64()
437 * First enable PAE, long mode needs it before CR0.PG = 1 is set. in rsm_enter_protected_mode()
438 * Then enable protected mode. However, PCID cannot be enabled in rsm_enter_protected_mode()
467 struct kvm_vcpu *vcpu = ctxt->vcpu; in rsm_load_state_32()
471 ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED; in rsm_load_state_32()
472 ctxt->_eip = smstate->eip; in rsm_load_state_32()
475 *reg_write(ctxt, i) = smstate->gprs[i]; in rsm_load_state_32()
477 if (kvm_set_dr(vcpu, 6, smstate->dr6)) in rsm_load_state_32()
479 if (kvm_set_dr(vcpu, 7, smstate->dr7)) in rsm_load_state_32()
482 rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR); in rsm_load_state_32()
483 rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR); in rsm_load_state_32()
485 dt.address = smstate->gdtr.base; in rsm_load_state_32()
486 dt.size = smstate->gdtr.limit; in rsm_load_state_32()
489 dt.address = smstate->idtr.base; in rsm_load_state_32()
490 dt.size = smstate->idtr.limit; in rsm_load_state_32()
493 rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES); in rsm_load_state_32()
494 rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS); in rsm_load_state_32()
495 rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS); in rsm_load_state_32()
497 rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS); in rsm_load_state_32()
498 rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS); in rsm_load_state_32()
499 rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS); in rsm_load_state_32()
501 vcpu->arch.smbase = smstate->smbase; in rsm_load_state_32()
503 r = rsm_enter_protected_mode(vcpu, smstate->cr0, in rsm_load_state_32()
504 smstate->cr3, smstate->cr4); in rsm_load_state_32()
510 ctxt->interruptibility = (u8)smstate->int_shadow; in rsm_load_state_32()
519 struct kvm_vcpu *vcpu = ctxt->vcpu; in rsm_load_state_64()
524 *reg_write(ctxt, i) = smstate->gprs[15 - i]; in rsm_load_state_64()
526 ctxt->_eip = smstate->rip; in rsm_load_state_64()
527 ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED; in rsm_load_state_64()
529 if (kvm_set_dr(vcpu, 6, smstate->dr6)) in rsm_load_state_64()
531 if (kvm_set_dr(vcpu, 7, smstate->dr7)) in rsm_load_state_64()
534 vcpu->arch.smbase = smstate->smbase; in rsm_load_state_64()
536 if (__kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA)) in rsm_load_state_64()
539 rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR); in rsm_load_state_64()
541 dt.size = smstate->idtr.limit; in rsm_load_state_64()
542 dt.address = smstate->idtr.base; in rsm_load_state_64()
545 rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR); in rsm_load_state_64()
547 dt.size = smstate->gdtr.limit; in rsm_load_state_64()
548 dt.address = smstate->gdtr.base; in rsm_load_state_64()
551 r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4); in rsm_load_state_64()
555 rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES); in rsm_load_state_64()
556 rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS); in rsm_load_state_64()
557 rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS); in rsm_load_state_64()
558 rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS); in rsm_load_state_64()
559 rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS); in rsm_load_state_64()
560 rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS); in rsm_load_state_64()
563 ctxt->interruptibility = (u8)smstate->int_shadow; in rsm_load_state_64()
566 kvm_msr_write(vcpu, MSR_KVM_INTERNAL_GUEST_SSP, smstate->ssp)) in rsm_load_state_64()
575 struct kvm_vcpu *vcpu = ctxt->vcpu; in emulator_leave_smm()
581 smbase = vcpu->arch.smbase; in emulator_leave_smm()
587 if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0) in emulator_leave_smm()
607 /* A 32-bit code segment is required to clear EFER.LMA. */ in emulator_leave_smm()
615 /* For the 64-bit case, this will clear EFER.LMA. */ in emulator_leave_smm()
629 /* And finally go back to 32-bit mode. */ in emulator_leave_smm()
638 * piggybacks the nested VM-Enter flows (which is wrong for many other in emulator_leave_smm()
657 * the shutdown, so that L1 enters shutdown instead of seeing a VM-Exit in emulator_leave_smm()