Lines Matching +full:retain +full:- +full:state +full:- +full:shutdown

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 #define pr_fmt(fmt) "kvm-guest: " fmt
60 early_param("no-kvmapf", parse_no_kvmapf);
69 early_param("no-steal-acc", parse_no_stealacc);
104 hlist_for_each(p, &b->list) { in _find_apf_task()
107 if (n->token == token) in _find_apf_task()
120 raw_spin_lock(&b->lock); in kvm_async_pf_queue_task()
123 /* dummy entry exist -> wake up was delivered ahead of PF */ in kvm_async_pf_queue_task()
124 hlist_del(&e->link); in kvm_async_pf_queue_task()
125 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
130 n->token = token; in kvm_async_pf_queue_task()
131 n->cpu = smp_processor_id(); in kvm_async_pf_queue_task()
132 init_swait_queue_head(&n->wq); in kvm_async_pf_queue_task()
133 hlist_add_head(&n->link, &b->list); in kvm_async_pf_queue_task()
134 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
139 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
170 hlist_del_init(&n->link); in apf_task_wake_one()
171 if (swq_has_sleeper(&n->wq)) in apf_task_wake_one()
172 swake_up_one(&n->wq); in apf_task_wake_one()
184 raw_spin_lock(&b->lock); in apf_task_wake_all()
185 hlist_for_each_safe(p, next, &b->list) { in apf_task_wake_all()
187 if (n->cpu == smp_processor_id()) in apf_task_wake_all()
190 raw_spin_unlock(&b->lock); in apf_task_wake_all()
206 raw_spin_lock(&b->lock); in kvm_async_pf_task_wake()
215 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
232 dummy->token = token; in kvm_async_pf_task_wake()
233 dummy->cpu = smp_processor_id(); in kvm_async_pf_task_wake()
234 init_swait_queue_head(&dummy->wq); in kvm_async_pf_task_wake()
235 hlist_add_head(&dummy->link, &b->list); in kvm_async_pf_task_wake()
240 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
262 irqentry_state_t state; in __kvm_handle_async_pf() local
267 state = irqentry_enter(regs); in __kvm_handle_async_pf()
275 if (unlikely(!(regs->flags & X86_EFLAGS_IF))) in __kvm_handle_async_pf()
288 irqentry_exit(regs, state); in __kvm_handle_async_pf()
414 version = src->version; in kvm_steal_clock()
416 steal = src->steal; in kvm_steal_clock()
418 } while ((version & 1) || (version != src->version)); in kvm_steal_clock()
433 * hotplugged will have their per-cpu variable already mapped as
451 static void kvm_guest_cpu_offline(bool shutdown) in kvm_guest_cpu_offline() argument
459 if (!shutdown) in kvm_guest_cpu_offline()
534 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { in __send_ipi_mask()
535 ipi_bitmap <<= min - apic_id; in __send_ipi_mask()
542 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
547 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); in __send_ipi_mask()
553 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
654 u8 state; in kvm_flush_tlb_multi() local
662 * queue flush_on_enter for pre-empted vCPUs in kvm_flush_tlb_multi()
667 * skip check for local vCPU - it will never be cleared from in kvm_flush_tlb_multi()
671 state = READ_ONCE(src->preempted); in kvm_flush_tlb_multi()
672 if ((state & KVM_VCPU_PREEMPTED)) { in kvm_flush_tlb_multi()
673 if (try_cmpxchg(&src->preempted, &state, in kvm_flush_tlb_multi()
674 state | KVM_VCPU_FLUSH_TLB)) in kvm_flush_tlb_multi()
702 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() in kvm_smp_prepare_boot_cpu()
776 * registered memory location. If the guest happens to shutdown, this memory
795 return !!(src->preempted & KVM_VCPU_PREEMPTED); in __kvm_vcpu_is_preempted()
801 #include <asm/asm-offsets.h>
806 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
889 static int kvm_cpuid_base = -1; in kvm_cpuid_base()
891 if (kvm_cpuid_base == -1) in kvm_cpuid_base()
942 * Note, hardware requires variable MTRR ranges to be power-of-2 sized in kvm_init_platform()
943 * and naturally aligned. But when forcing guest MTRR state, Linux in kvm_init_platform()
945 * the math to generate a technically-legal range. in kvm_init_platform()
949 .mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V, in kvm_init_platform()
950 .mask_hi = (BIT_ULL(boot_cpu_data.x86_phys_bits) - 1) >> 32, in kvm_init_platform()
968 * here as we need to retain the UEFI/OVMF firmware in kvm_init_platform()
972 for (i = 0; i < e820_table->nr_entries; i++) { in kvm_init_platform()
973 struct e820_entry *entry = &e820_table->entries[i]; in kvm_init_platform()
975 if (entry->type != E820_TYPE_RAM) in kvm_init_platform()
978 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE); in kvm_init_platform()
980 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr, in kvm_init_platform()
990 __end_bss_decrypted - __start_bss_decrypted, 0); in kvm_init_platform()
1003 * Set WB as the default cache mode for SEV-SNP and TDX, with a single in kvm_init_platform()
1014 ghcb_set_rbx(ghcb, regs->bx); in kvm_sev_es_hcall_prepare()
1015 ghcb_set_rcx(ghcb, regs->cx); in kvm_sev_es_hcall_prepare()
1016 ghcb_set_rdx(ghcb, regs->dx); in kvm_sev_es_hcall_prepare()
1017 ghcb_set_rsi(ghcb, regs->si); in kvm_sev_es_hcall_prepare()
1022 /* No checking of the return state needed */ in kvm_sev_es_hcall_finish()