vmx.c (8b50b92f9f1a819cba290e24064337004c00ee36) vmx.c (8888cdd0996c2d51cd417f9a60a282c034f3fa28)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.

--- 381 unchanged lines hidden (view full) ---

390static DEFINE_PER_CPU(struct vmcs *, vmxarea);
391DEFINE_PER_CPU(struct vmcs *, current_vmcs);
392/*
393 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
394 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
395 */
396static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
397
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.

--- 381 unchanged lines hidden (view full) ---

390static DEFINE_PER_CPU(struct vmcs *, vmxarea);
391DEFINE_PER_CPU(struct vmcs *, current_vmcs);
392/*
393 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
394 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
395 */
396static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
397
398/*
399 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
400 * can find which vCPU should be waken up.
401 */
402static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
403static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
404
405static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
406static DEFINE_SPINLOCK(vmx_vpid_lock);
407
408struct vmcs_config vmcs_config;
409struct vmx_capability vmx_capability;
410
411#define VMX_SEGMENT_FIELD(seg) \
412 [VCPU_SREG_##seg] = { \

--- 838 unchanged lines hidden (view full) ---

1251 preempt_disable();
1252 if (vmx->guest_state_loaded)
1253 wrmsrl(MSR_KERNEL_GS_BASE, data);
1254 preempt_enable();
1255 vmx->msr_guest_kernel_gs_base = data;
1256}
1257#endif
1258
398static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
399static DEFINE_SPINLOCK(vmx_vpid_lock);
400
401struct vmcs_config vmcs_config;
402struct vmx_capability vmx_capability;
403
404#define VMX_SEGMENT_FIELD(seg) \
405 [VCPU_SREG_##seg] = { \

--- 838 unchanged lines hidden (view full) ---

1244 preempt_disable();
1245 if (vmx->guest_state_loaded)
1246 wrmsrl(MSR_KERNEL_GS_BASE, data);
1247 preempt_enable();
1248 vmx->msr_guest_kernel_gs_base = data;
1249}
1250#endif
1251
1259static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1260{
1261 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
1262 struct pi_desc old, new;
1263 unsigned int dest;
1264
1265 /*
1266 * In case of hot-plug or hot-unplug, we may have to undo
1267 * vmx_vcpu_pi_put even if there is no assigned device. And we
1268 * always keep PI.NDST up to date for simplicity: it makes the
1269 * code easier, and CPU migration is not a fast path.
1270 */
1271 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
1272 return;
1273
1274 /*
1275 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
1276 * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
1277 * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
1278 * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
1279 * correctly.
1280 */
1281 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
1282 pi_clear_sn(pi_desc);
1283 goto after_clear_sn;
1284 }
1285
1286 /* The full case. */
1287 do {
1288 old.control = new.control = pi_desc->control;
1289
1290 dest = cpu_physical_id(cpu);
1291
1292 if (x2apic_enabled())
1293 new.ndst = dest;
1294 else
1295 new.ndst = (dest << 8) & 0xFF00;
1296
1297 new.sn = 0;
1298 } while (cmpxchg64(&pi_desc->control, old.control,
1299 new.control) != old.control);
1300
1301after_clear_sn:
1302
1303 /*
1304 * Clear SN before reading the bitmap. The VT-d firmware
1305 * writes the bitmap and reads SN atomically (5.2.3 in the
1306 * spec), so it doesn't really have a memory barrier that
1307 * pairs with this, but we cannot do that and we need one.
1308 */
1309 smp_mb__after_atomic();
1310
1311 if (!pi_is_pir_empty(pi_desc))
1312 pi_set_on(pi_desc);
1313}
1314
1315void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1316 struct loaded_vmcs *buddy)
1317{
1318 struct vcpu_vmx *vmx = to_vmx(vcpu);
1319 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1320 struct vmcs *prev;
1321
1322 if (!already_loaded) {

--- 67 unchanged lines hidden (view full) ---

1390
1391 vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1392
1393 vmx_vcpu_pi_load(vcpu, cpu);
1394
1395 vmx->host_debugctlmsr = get_debugctlmsr();
1396}
1397
1252void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1253 struct loaded_vmcs *buddy)
1254{
1255 struct vcpu_vmx *vmx = to_vmx(vcpu);
1256 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1257 struct vmcs *prev;
1258
1259 if (!already_loaded) {

--- 67 unchanged lines hidden (view full) ---

1327
1328 vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1329
1330 vmx_vcpu_pi_load(vcpu, cpu);
1331
1332 vmx->host_debugctlmsr = get_debugctlmsr();
1333}
1334
1398static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
1399{
1400 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
1401
1402 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
1403 !irq_remapping_cap(IRQ_POSTING_CAP) ||
1404 !kvm_vcpu_apicv_active(vcpu))
1405 return;
1406
1407 /* Set SN when the vCPU is preempted */
1408 if (vcpu->preempted)
1409 pi_set_sn(pi_desc);
1410}
1411
1412static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1413{
1414 vmx_vcpu_pi_put(vcpu);
1415
1416 vmx_prepare_switch_to_host(to_vmx(vcpu));
1417}
1418
1419static bool emulation_required(struct kvm_vcpu *vcpu)

--- 3983 unchanged lines hidden (view full) ---

5403
5404 if (vmx->ple_window != old) {
5405 vmx->ple_window_dirty = true;
5406 trace_kvm_ple_window_update(vcpu->vcpu_id,
5407 vmx->ple_window, old);
5408 }
5409}
5410
1335static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1336{
1337 vmx_vcpu_pi_put(vcpu);
1338
1339 vmx_prepare_switch_to_host(to_vmx(vcpu));
1340}
1341
1342static bool emulation_required(struct kvm_vcpu *vcpu)

--- 3983 unchanged lines hidden (view full) ---

5326
5327 if (vmx->ple_window != old) {
5328 vmx->ple_window_dirty = true;
5329 trace_kvm_ple_window_update(vcpu->vcpu_id,
5330 vmx->ple_window, old);
5331 }
5332}
5333
5411/*
5412 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
5413 */
5414static void wakeup_handler(void)
5415{
5416 struct kvm_vcpu *vcpu;
5417 int cpu = smp_processor_id();
5418
5419 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
5420 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
5421 blocked_vcpu_list) {
5422 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
5423
5424 if (pi_test_on(pi_desc) == 1)
5425 kvm_vcpu_kick(vcpu);
5426 }
5427 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
5428}
5429
5430static void vmx_enable_tdp(void)
5431{
5432 kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
5433 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
5434 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
5435 0ull, VMX_EPT_EXECUTABLE_MASK,
5436 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
5437 VMX_EPT_RWX_MASK, 0ull);

--- 840 unchanged lines hidden (view full) ---

6278 }
6279 } else {
6280 max_irr = kvm_lapic_find_highest_irr(vcpu);
6281 }
6282 vmx_hwapic_irr_update(vcpu, max_irr);
6283 return max_irr;
6284}
6285
5334static void vmx_enable_tdp(void)
5335{
5336 kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
5337 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
5338 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
5339 0ull, VMX_EPT_EXECUTABLE_MASK,
5340 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
5341 VMX_EPT_RWX_MASK, 0ull);

--- 840 unchanged lines hidden (view full) ---

6182 }
6183 } else {
6184 max_irr = kvm_lapic_find_highest_irr(vcpu);
6185 }
6186 vmx_hwapic_irr_update(vcpu, max_irr);
6187 return max_irr;
6188}
6189
6286static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
6287{
6288 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
6289
6290 return pi_test_on(pi_desc) ||
6291 (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
6292}
6293
6294static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6295{
6296 if (!kvm_vcpu_apicv_active(vcpu))
6297 return;
6298
6299 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6300 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6301 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);

--- 1125 unchanged lines hidden (view full) ---

7427
7428static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
7429 struct kvm_memory_slot *memslot,
7430 gfn_t offset, unsigned long mask)
7431{
7432 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
7433}
7434
6190static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6191{
6192 if (!kvm_vcpu_apicv_active(vcpu))
6193 return;
6194
6195 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6196 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6197 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);

--- 1125 unchanged lines hidden (view full) ---

7323
7324static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
7325 struct kvm_memory_slot *memslot,
7326 gfn_t offset, unsigned long mask)
7327{
7328 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
7329}
7330
7435static void __pi_post_block(struct kvm_vcpu *vcpu)
7436{
7437 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
7438 struct pi_desc old, new;
7439 unsigned int dest;
7440
7441 do {
7442 old.control = new.control = pi_desc->control;
7443 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
7444 "Wakeup handler not enabled while the VCPU is blocked\n");
7445
7446 dest = cpu_physical_id(vcpu->cpu);
7447
7448 if (x2apic_enabled())
7449 new.ndst = dest;
7450 else
7451 new.ndst = (dest << 8) & 0xFF00;
7452
7453 /* set 'NV' to 'notification vector' */
7454 new.nv = POSTED_INTR_VECTOR;
7455 } while (cmpxchg64(&pi_desc->control, old.control,
7456 new.control) != old.control);
7457
7458 if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
7459 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
7460 list_del(&vcpu->blocked_vcpu_list);
7461 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
7462 vcpu->pre_pcpu = -1;
7463 }
7464}
7465
7466/*
7467 * This routine does the following things for vCPU which is going
7468 * to be blocked if VT-d PI is enabled.
7469 * - Store the vCPU to the wakeup list, so when interrupts happen
7470 * we can find the right vCPU to wake up.
7471 * - Change the Posted-interrupt descriptor as below:
7472 * 'NDST' <-- vcpu->pre_pcpu
7473 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
7474 * - If 'ON' is set during this process, which means at least one
7475 * interrupt is posted for this vCPU, we cannot block it, in
7476 * this case, return 1, otherwise, return 0.
7477 *
7478 */
7479static int pi_pre_block(struct kvm_vcpu *vcpu)
7480{
7481 unsigned int dest;
7482 struct pi_desc old, new;
7483 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
7484
7485 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
7486 !irq_remapping_cap(IRQ_POSTING_CAP) ||
7487 !kvm_vcpu_apicv_active(vcpu))
7488 return 0;
7489
7490 WARN_ON(irqs_disabled());
7491 local_irq_disable();
7492 if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
7493 vcpu->pre_pcpu = vcpu->cpu;
7494 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
7495 list_add_tail(&vcpu->blocked_vcpu_list,
7496 &per_cpu(blocked_vcpu_on_cpu,
7497 vcpu->pre_pcpu));
7498 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
7499 }
7500
7501 do {
7502 old.control = new.control = pi_desc->control;
7503
7504 WARN((pi_desc->sn == 1),
7505 "Warning: SN field of posted-interrupts "
7506 "is set before blocking\n");
7507
7508 /*
7509 * Since vCPU can be preempted during this process,
7510 * vcpu->cpu could be different with pre_pcpu, we
7511 * need to set pre_pcpu as the destination of wakeup
7512 * notification event, then we can find the right vCPU
7513 * to wakeup in wakeup handler if interrupts happen
7514 * when the vCPU is in blocked state.
7515 */
7516 dest = cpu_physical_id(vcpu->pre_pcpu);
7517
7518 if (x2apic_enabled())
7519 new.ndst = dest;
7520 else
7521 new.ndst = (dest << 8) & 0xFF00;
7522
7523 /* set 'NV' to 'wakeup vector' */
7524 new.nv = POSTED_INTR_WAKEUP_VECTOR;
7525 } while (cmpxchg64(&pi_desc->control, old.control,
7526 new.control) != old.control);
7527
7528 /* We should not block the vCPU if an interrupt is posted for it. */
7529 if (pi_test_on(pi_desc) == 1)
7530 __pi_post_block(vcpu);
7531
7532 local_irq_enable();
7533 return (vcpu->pre_pcpu == -1);
7534}
7535
7536static int vmx_pre_block(struct kvm_vcpu *vcpu)
7537{
7538 if (pi_pre_block(vcpu))
7539 return 1;
7540
7541 if (kvm_lapic_hv_timer_in_use(vcpu))
7542 kvm_lapic_switch_to_sw_timer(vcpu);
7543
7544 return 0;
7545}
7546
7331static int vmx_pre_block(struct kvm_vcpu *vcpu)
7332{
7333 if (pi_pre_block(vcpu))
7334 return 1;
7335
7336 if (kvm_lapic_hv_timer_in_use(vcpu))
7337 kvm_lapic_switch_to_sw_timer(vcpu);
7338
7339 return 0;
7340}
7341
7547static void pi_post_block(struct kvm_vcpu *vcpu)
7548{
7549 if (vcpu->pre_pcpu == -1)
7550 return;
7551
7552 WARN_ON(irqs_disabled());
7553 local_irq_disable();
7554 __pi_post_block(vcpu);
7555 local_irq_enable();
7556}
7557
7558static void vmx_post_block(struct kvm_vcpu *vcpu)
7559{
7560 if (kvm_x86_ops.set_hv_timer)
7561 kvm_lapic_switch_to_hv_timer(vcpu);
7562
7563 pi_post_block(vcpu);
7564}
7565
7342static void vmx_post_block(struct kvm_vcpu *vcpu)
7343{
7344 if (kvm_x86_ops.set_hv_timer)
7345 kvm_lapic_switch_to_hv_timer(vcpu);
7346
7347 pi_post_block(vcpu);
7348}
7349
7566/*
7567 * vmx_update_pi_irte - set IRTE for Posted-Interrupts
7568 *
7569 * @kvm: kvm
7570 * @host_irq: host irq of the interrupt
7571 * @guest_irq: gsi of the interrupt
7572 * @set: set or unset PI
7573 * returns 0 on success, < 0 on failure
7574 */
7575static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
7576 uint32_t guest_irq, bool set)
7577{
7578 struct kvm_kernel_irq_routing_entry *e;
7579 struct kvm_irq_routing_table *irq_rt;
7580 struct kvm_lapic_irq irq;
7581 struct kvm_vcpu *vcpu;
7582 struct vcpu_data vcpu_info;
7583 int idx, ret = 0;
7584
7585 if (!kvm_arch_has_assigned_device(kvm) ||
7586 !irq_remapping_cap(IRQ_POSTING_CAP) ||
7587 !kvm_vcpu_apicv_active(kvm->vcpus[0]))
7588 return 0;
7589
7590 idx = srcu_read_lock(&kvm->irq_srcu);
7591 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
7592 if (guest_irq >= irq_rt->nr_rt_entries ||
7593 hlist_empty(&irq_rt->map[guest_irq])) {
7594 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
7595 guest_irq, irq_rt->nr_rt_entries);
7596 goto out;
7597 }
7598
7599 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
7600 if (e->type != KVM_IRQ_ROUTING_MSI)
7601 continue;
7602 /*
7603 * VT-d PI cannot support posting multicast/broadcast
7604 * interrupts to a vCPU, we still use interrupt remapping
7605 * for these kind of interrupts.
7606 *
7607 * For lowest-priority interrupts, we only support
7608 * those with single CPU as the destination, e.g. user
7609 * configures the interrupts via /proc/irq or uses
7610 * irqbalance to make the interrupts single-CPU.
7611 *
7612 * We will support full lowest-priority interrupt later.
7613 *
7614 * In addition, we can only inject generic interrupts using
7615 * the PI mechanism, refuse to route others through it.
7616 */
7617
7618 kvm_set_msi_irq(kvm, e, &irq);
7619 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
7620 !kvm_irq_is_postable(&irq)) {
7621 /*
7622 * Make sure the IRTE is in remapped mode if
7623 * we don't handle it in posted mode.
7624 */
7625 ret = irq_set_vcpu_affinity(host_irq, NULL);
7626 if (ret < 0) {
7627 printk(KERN_INFO
7628 "failed to back to remapped mode, irq: %u\n",
7629 host_irq);
7630 goto out;
7631 }
7632
7633 continue;
7634 }
7635
7636 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
7637 vcpu_info.vector = irq.vector;
7638
7639 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
7640 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
7641
7642 if (set)
7643 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
7644 else
7645 ret = irq_set_vcpu_affinity(host_irq, NULL);
7646
7647 if (ret < 0) {
7648 printk(KERN_INFO "%s: failed to update PI IRTE\n",
7649 __func__);
7650 goto out;
7651 }
7652 }
7653
7654 ret = 0;
7655out:
7656 srcu_read_unlock(&kvm->irq_srcu, idx);
7657 return ret;
7658}
7659
7660static void vmx_setup_mce(struct kvm_vcpu *vcpu)
7661{
7662 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
7663 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
7664 FEAT_CTL_LMCE_ENABLED;
7665 else
7666 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
7667 ~FEAT_CTL_LMCE_ENABLED;

--- 147 unchanged lines hidden (view full) ---

7815 .load_eoi_exitmap = vmx_load_eoi_exitmap,
7816 .apicv_post_state_restore = vmx_apicv_post_state_restore,
7817 .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
7818 .hwapic_irr_update = vmx_hwapic_irr_update,
7819 .hwapic_isr_update = vmx_hwapic_isr_update,
7820 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
7821 .sync_pir_to_irr = vmx_sync_pir_to_irr,
7822 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
7350static void vmx_setup_mce(struct kvm_vcpu *vcpu)
7351{
7352 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
7353 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
7354 FEAT_CTL_LMCE_ENABLED;
7355 else
7356 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
7357 ~FEAT_CTL_LMCE_ENABLED;

--- 147 unchanged lines hidden (view full) ---

7505 .load_eoi_exitmap = vmx_load_eoi_exitmap,
7506 .apicv_post_state_restore = vmx_apicv_post_state_restore,
7507 .check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
7508 .hwapic_irr_update = vmx_hwapic_irr_update,
7509 .hwapic_isr_update = vmx_hwapic_isr_update,
7510 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
7511 .sync_pir_to_irr = vmx_sync_pir_to_irr,
7512 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
7823 .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
7513 .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
7824
7825 .set_tss_addr = vmx_set_tss_addr,
7826 .set_identity_map_addr = vmx_set_identity_map_addr,
7827 .get_mt_mask = vmx_get_mt_mask,
7828
7829 .get_exit_info = vmx_get_exit_info,
7830
7831 .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,

--- 17 unchanged lines hidden (view full) ---

7849 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
7850
7851 .pre_block = vmx_pre_block,
7852 .post_block = vmx_post_block,
7853
7854 .pmu_ops = &intel_pmu_ops,
7855 .nested_ops = &vmx_nested_ops,
7856
7514
7515 .set_tss_addr = vmx_set_tss_addr,
7516 .set_identity_map_addr = vmx_set_identity_map_addr,
7517 .get_mt_mask = vmx_get_mt_mask,
7518
7519 .get_exit_info = vmx_get_exit_info,
7520
7521 .vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,

--- 17 unchanged lines hidden (view full) ---

7539 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
7540
7541 .pre_block = vmx_pre_block,
7542 .post_block = vmx_post_block,
7543
7544 .pmu_ops = &intel_pmu_ops,
7545 .nested_ops = &vmx_nested_ops,
7546
7857 .update_pi_irte = vmx_update_pi_irte,
7547 .update_pi_irte = pi_update_irte,
7858
7859#ifdef CONFIG_X86_64
7860 .set_hv_timer = vmx_set_hv_timer,
7861 .cancel_hv_timer = vmx_cancel_hv_timer,
7862#endif
7863
7864 .setup_mce = vmx_setup_mce,
7865

--- 149 unchanged lines hidden (view full) ---

8015 }
8016
8017 if (!enable_preemption_timer) {
8018 vmx_x86_ops.set_hv_timer = NULL;
8019 vmx_x86_ops.cancel_hv_timer = NULL;
8020 vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
8021 }
8022
7548
7549#ifdef CONFIG_X86_64
7550 .set_hv_timer = vmx_set_hv_timer,
7551 .cancel_hv_timer = vmx_cancel_hv_timer,
7552#endif
7553
7554 .setup_mce = vmx_setup_mce,
7555

--- 149 unchanged lines hidden (view full) ---

7705 }
7706
7707 if (!enable_preemption_timer) {
7708 vmx_x86_ops.set_hv_timer = NULL;
7709 vmx_x86_ops.cancel_hv_timer = NULL;
7710 vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
7711 }
7712
8023 kvm_set_posted_intr_wakeup_handler(wakeup_handler);
7713 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
8024
8025 kvm_mce_cap_supported |= MCG_LMCE_P;
8026
8027 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
8028 return -EINVAL;
8029 if (!enable_ept || !cpu_has_vmx_intel_pt())
8030 pt_mode = PT_MODE_SYSTEM;
8031

--- 122 unchanged lines hidden (view full) ---

8154 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
8155 if (r) {
8156 vmx_exit();
8157 return r;
8158 }
8159
8160 for_each_possible_cpu(cpu) {
8161 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
7714
7715 kvm_mce_cap_supported |= MCG_LMCE_P;
7716
7717 if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
7718 return -EINVAL;
7719 if (!enable_ept || !cpu_has_vmx_intel_pt())
7720 pt_mode = PT_MODE_SYSTEM;
7721

--- 122 unchanged lines hidden (view full) ---

7844 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
7845 if (r) {
7846 vmx_exit();
7847 return r;
7848 }
7849
7850 for_each_possible_cpu(cpu) {
7851 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8162 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
8163 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7852
7853 pi_init(cpu);
8164 }
8165
8166#ifdef CONFIG_KEXEC_CORE
8167 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
8168 crash_vmclear_local_loaded_vmcss);
8169#endif
8170 vmx_check_vmcs12_offsets();
8171
8172 /*
8173 * Intel processors don't have problems with
8174 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
8175 * it for VMX by default
8176 */
8177 allow_smaller_maxphyaddr = true;
8178
8179 return 0;
8180}
8181module_init(vmx_init);
7854 }
7855
7856#ifdef CONFIG_KEXEC_CORE
7857 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
7858 crash_vmclear_local_loaded_vmcss);
7859#endif
7860 vmx_check_vmcs12_offsets();
7861
7862 /*
7863 * Intel processors don't have problems with
7864 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
7865 * it for VMX by default
7866 */
7867 allow_smaller_maxphyaddr = true;
7868
7869 return 0;
7870}
7871module_init(vmx_init);