1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * KVM paravirt_ops implementation
4 *
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
10 #define pr_fmt(fmt) "kvm-guest: " fmt
11
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/cc_platform.h>
31 #include <linux/efi.h>
32 #include <linux/kvm_types.h>
33 #include <asm/timer.h>
34 #include <asm/cpu.h>
35 #include <asm/traps.h>
36 #include <asm/desc.h>
37 #include <asm/tlbflush.h>
38 #include <asm/apic.h>
39 #include <asm/apicdef.h>
40 #include <asm/hypervisor.h>
41 #include <asm/mtrr.h>
42 #include <asm/tlb.h>
43 #include <asm/cpuidle_haltpoll.h>
44 #include <asm/msr.h>
45 #include <asm/ptrace.h>
46 #include <asm/reboot.h>
47 #include <asm/svm.h>
48 #include <asm/e820/api.h>
49
50 DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled);
51
52 static int kvmapf = 1;
53
parse_no_kvmapf(char * arg)54 static int __init parse_no_kvmapf(char *arg)
55 {
56 kvmapf = 0;
57 return 0;
58 }
59
60 early_param("no-kvmapf", parse_no_kvmapf);
61
62 static int steal_acc = 1;
parse_no_stealacc(char * arg)63 static int __init parse_no_stealacc(char *arg)
64 {
65 steal_acc = 0;
66 return 0;
67 }
68
69 early_param("no-steal-acc", parse_no_stealacc);
70
71 static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled);
72 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
73 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
74 static int has_steal_clock = 0;
75
76 static int has_guest_poll = 0;
77 /*
78 * No need for any "IO delay" on KVM
79 */
kvm_io_delay(void)80 static void kvm_io_delay(void)
81 {
82 }
83
84 #define KVM_TASK_SLEEP_HASHBITS 8
85 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
86
87 struct kvm_task_sleep_node {
88 struct hlist_node link;
89 struct swait_queue_head wq;
90 u32 token;
91 int cpu;
92 };
93
94 static struct kvm_task_sleep_head {
95 raw_spinlock_t lock;
96 struct hlist_head list;
97 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
98
_find_apf_task(struct kvm_task_sleep_head * b,u32 token)99 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
100 u32 token)
101 {
102 struct hlist_node *p;
103
104 hlist_for_each(p, &b->list) {
105 struct kvm_task_sleep_node *n =
106 hlist_entry(p, typeof(*n), link);
107 if (n->token == token)
108 return n;
109 }
110
111 return NULL;
112 }
113
kvm_async_pf_queue_task(u32 token,struct kvm_task_sleep_node * n)114 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
115 {
116 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
117 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
118 struct kvm_task_sleep_node *e;
119
120 raw_spin_lock(&b->lock);
121 e = _find_apf_task(b, token);
122 if (e) {
123 /* dummy entry exist -> wake up was delivered ahead of PF */
124 hlist_del(&e->link);
125 raw_spin_unlock(&b->lock);
126 kfree(e);
127 return false;
128 }
129
130 n->token = token;
131 n->cpu = smp_processor_id();
132 init_swait_queue_head(&n->wq);
133 hlist_add_head(&n->link, &b->list);
134 raw_spin_unlock(&b->lock);
135 return true;
136 }
137
138 /*
139 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
140 * @token: Token to identify the sleep node entry
141 *
142 * Invoked from the async pagefault handling code or from the VM exit page
143 * fault handler. In both cases RCU is watching.
144 */
kvm_async_pf_task_wait_schedule(u32 token)145 void kvm_async_pf_task_wait_schedule(u32 token)
146 {
147 struct kvm_task_sleep_node n;
148 DECLARE_SWAITQUEUE(wait);
149
150 lockdep_assert_irqs_disabled();
151
152 if (!kvm_async_pf_queue_task(token, &n))
153 return;
154
155 for (;;) {
156 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
157 if (hlist_unhashed(&n.link))
158 break;
159
160 local_irq_enable();
161 schedule();
162 local_irq_disable();
163 }
164 finish_swait(&n.wq, &wait);
165 }
166 EXPORT_SYMBOL_FOR_KVM(kvm_async_pf_task_wait_schedule);
167
apf_task_wake_one(struct kvm_task_sleep_node * n)168 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
169 {
170 hlist_del_init(&n->link);
171 if (swq_has_sleeper(&n->wq))
172 swake_up_one(&n->wq);
173 }
174
apf_task_wake_all(void)175 static void apf_task_wake_all(void)
176 {
177 int i;
178
179 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
180 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
181 struct kvm_task_sleep_node *n;
182 struct hlist_node *p, *next;
183
184 raw_spin_lock(&b->lock);
185 hlist_for_each_safe(p, next, &b->list) {
186 n = hlist_entry(p, typeof(*n), link);
187 if (n->cpu == smp_processor_id())
188 apf_task_wake_one(n);
189 }
190 raw_spin_unlock(&b->lock);
191 }
192 }
193
kvm_async_pf_task_wake(u32 token)194 static void kvm_async_pf_task_wake(u32 token)
195 {
196 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
197 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
198 struct kvm_task_sleep_node *n, *dummy = NULL;
199
200 if (token == ~0) {
201 apf_task_wake_all();
202 return;
203 }
204
205 again:
206 raw_spin_lock(&b->lock);
207 n = _find_apf_task(b, token);
208 if (!n) {
209 /*
210 * Async #PF not yet handled, add a dummy entry for the token.
211 * Allocating the token must be down outside of the raw lock
212 * as the allocator is preemptible on PREEMPT_RT kernels.
213 */
214 if (!dummy) {
215 raw_spin_unlock(&b->lock);
216 dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
217
218 /*
219 * Continue looping on allocation failure, eventually
220 * the async #PF will be handled and allocating a new
221 * node will be unnecessary.
222 */
223 if (!dummy)
224 cpu_relax();
225
226 /*
227 * Recheck for async #PF completion before enqueueing
228 * the dummy token to avoid duplicate list entries.
229 */
230 goto again;
231 }
232 dummy->token = token;
233 dummy->cpu = smp_processor_id();
234 init_swait_queue_head(&dummy->wq);
235 hlist_add_head(&dummy->link, &b->list);
236 dummy = NULL;
237 } else {
238 apf_task_wake_one(n);
239 }
240 raw_spin_unlock(&b->lock);
241
242 /* A dummy token might be allocated and ultimately not used. */
243 kfree(dummy);
244 }
245
kvm_read_and_reset_apf_flags(void)246 noinstr u32 kvm_read_and_reset_apf_flags(void)
247 {
248 u32 flags = 0;
249
250 if (__this_cpu_read(async_pf_enabled)) {
251 flags = __this_cpu_read(apf_reason.flags);
252 __this_cpu_write(apf_reason.flags, 0);
253 }
254
255 return flags;
256 }
257 EXPORT_SYMBOL_FOR_KVM(kvm_read_and_reset_apf_flags);
258
__kvm_handle_async_pf(struct pt_regs * regs,u32 token)259 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
260 {
261 u32 flags = kvm_read_and_reset_apf_flags();
262 irqentry_state_t state;
263
264 if (!flags)
265 return false;
266
267 state = irqentry_enter(regs);
268 instrumentation_begin();
269
270 /*
271 * If the host managed to inject an async #PF into an interrupt
272 * disabled region, then die hard as this is not going to end well
273 * and the host side is seriously broken.
274 */
275 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
276 panic("Host injected async #PF in interrupt disabled region\n");
277
278 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
279 if (unlikely(!(user_mode(regs))))
280 panic("Host injected async #PF in kernel mode\n");
281 /* Page is swapped out by the host. */
282 kvm_async_pf_task_wait_schedule(token);
283 } else {
284 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
285 }
286
287 instrumentation_end();
288 irqentry_exit(regs, state);
289 return true;
290 }
291
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)292 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
293 {
294 struct pt_regs *old_regs = set_irq_regs(regs);
295 u32 token;
296
297 apic_eoi();
298
299 inc_irq_stat(irq_hv_callback_count);
300
301 if (__this_cpu_read(async_pf_enabled)) {
302 token = __this_cpu_read(apf_reason.token);
303 kvm_async_pf_task_wake(token);
304 __this_cpu_write(apf_reason.token, 0);
305 wrmsrq(MSR_KVM_ASYNC_PF_ACK, 1);
306 }
307
308 set_irq_regs(old_regs);
309 }
310
paravirt_ops_setup(void)311 static void __init paravirt_ops_setup(void)
312 {
313 pv_info.name = "KVM";
314
315 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
316 pv_ops.cpu.io_delay = kvm_io_delay;
317
318 #ifdef CONFIG_X86_IO_APIC
319 no_timer_check = 1;
320 #endif
321 }
322
kvm_register_steal_time(void)323 static void kvm_register_steal_time(void)
324 {
325 int cpu = smp_processor_id();
326 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
327
328 if (!has_steal_clock)
329 return;
330
331 wrmsrq(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
332 pr_debug("stealtime: cpu %d, msr %llx\n", cpu,
333 (unsigned long long) slow_virt_to_phys(st));
334 }
335
336 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
337
kvm_guest_apic_eoi_write(void)338 static notrace __maybe_unused void kvm_guest_apic_eoi_write(void)
339 {
340 /**
341 * This relies on __test_and_clear_bit to modify the memory
342 * in a way that is atomic with respect to the local CPU.
343 * The hypervisor only accesses this memory from the local CPU so
344 * there's no need for lock or memory barriers.
345 * An optimization barrier is implied in apic write.
346 */
347 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
348 return;
349 apic_native_eoi();
350 }
351
kvm_guest_cpu_init(void)352 static void kvm_guest_cpu_init(void)
353 {
354 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
355 u64 pa;
356
357 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
358
359 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
360 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
361
362 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
363 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
364
365 wrmsrq(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
366
367 wrmsrq(MSR_KVM_ASYNC_PF_EN, pa);
368 __this_cpu_write(async_pf_enabled, true);
369 pr_debug("setup async PF for cpu %d\n", smp_processor_id());
370 }
371
372 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
373 unsigned long pa;
374
375 /* Size alignment is implied but just to make it explicit. */
376 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
377 __this_cpu_write(kvm_apic_eoi, 0);
378 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
379 | KVM_MSR_ENABLED;
380 wrmsrq(MSR_KVM_PV_EOI_EN, pa);
381 }
382
383 if (has_steal_clock)
384 kvm_register_steal_time();
385 }
386
kvm_pv_disable_apf(void)387 static void kvm_pv_disable_apf(void)
388 {
389 if (!__this_cpu_read(async_pf_enabled))
390 return;
391
392 wrmsrq(MSR_KVM_ASYNC_PF_EN, 0);
393 __this_cpu_write(async_pf_enabled, false);
394
395 pr_debug("disable async PF for cpu %d\n", smp_processor_id());
396 }
397
kvm_disable_steal_time(void)398 static void kvm_disable_steal_time(void)
399 {
400 if (!has_steal_clock)
401 return;
402
403 wrmsrq(MSR_KVM_STEAL_TIME, 0);
404 }
405
kvm_steal_clock(int cpu)406 static u64 kvm_steal_clock(int cpu)
407 {
408 u64 steal;
409 struct kvm_steal_time *src;
410 int version;
411
412 src = &per_cpu(steal_time, cpu);
413 do {
414 version = src->version;
415 virt_rmb();
416 steal = src->steal;
417 virt_rmb();
418 } while ((version & 1) || (version != src->version));
419
420 return steal;
421 }
422
__set_percpu_decrypted(void * ptr,unsigned long size)423 static inline __init void __set_percpu_decrypted(void *ptr, unsigned long size)
424 {
425 early_set_memory_decrypted((unsigned long) ptr, size);
426 }
427
428 /*
429 * Iterate through all possible CPUs and map the memory region pointed
430 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
431 *
432 * Note: we iterate through all possible CPUs to ensure that CPUs
433 * hotplugged will have their per-cpu variable already mapped as
434 * decrypted.
435 */
sev_map_percpu_data(void)436 static void __init sev_map_percpu_data(void)
437 {
438 int cpu;
439
440 if (cc_vendor != CC_VENDOR_AMD ||
441 !cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
442 return;
443
444 for_each_possible_cpu(cpu) {
445 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
446 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
447 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
448 }
449 }
450
kvm_guest_cpu_offline(bool shutdown)451 static void kvm_guest_cpu_offline(bool shutdown)
452 {
453 kvm_disable_steal_time();
454 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
455 wrmsrq(MSR_KVM_PV_EOI_EN, 0);
456 if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
457 wrmsrq(MSR_KVM_MIGRATION_CONTROL, 0);
458 kvm_pv_disable_apf();
459 if (!shutdown)
460 apf_task_wake_all();
461 kvmclock_disable();
462 }
463
kvm_cpu_online(unsigned int cpu)464 static int kvm_cpu_online(unsigned int cpu)
465 {
466 unsigned long flags;
467
468 local_irq_save(flags);
469 kvm_guest_cpu_init();
470 local_irq_restore(flags);
471 return 0;
472 }
473
474 #ifdef CONFIG_SMP
475
476 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
477
pv_tlb_flush_supported(void)478 static bool pv_tlb_flush_supported(void)
479 {
480 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
481 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
482 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
483 !boot_cpu_has(X86_FEATURE_MWAIT) &&
484 (num_possible_cpus() != 1));
485 }
486
pv_ipi_supported(void)487 static bool pv_ipi_supported(void)
488 {
489 return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
490 (num_possible_cpus() != 1));
491 }
492
pv_sched_yield_supported(void)493 static bool pv_sched_yield_supported(void)
494 {
495 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
496 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
497 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
498 !boot_cpu_has(X86_FEATURE_MWAIT) &&
499 (num_possible_cpus() != 1));
500 }
501
502 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
503
__send_ipi_mask(const struct cpumask * mask,int vector)504 static void __send_ipi_mask(const struct cpumask *mask, int vector)
505 {
506 unsigned long flags;
507 int cpu, min = 0, max = 0;
508 #ifdef CONFIG_X86_64
509 __uint128_t ipi_bitmap = 0;
510 #else
511 u64 ipi_bitmap = 0;
512 #endif
513 u32 apic_id, icr;
514 long ret;
515
516 if (cpumask_empty(mask))
517 return;
518
519 local_irq_save(flags);
520
521 switch (vector) {
522 default:
523 icr = APIC_DM_FIXED | vector;
524 break;
525 case NMI_VECTOR:
526 icr = APIC_DM_NMI;
527 break;
528 }
529
530 for_each_cpu(cpu, mask) {
531 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
532 if (!ipi_bitmap) {
533 min = max = apic_id;
534 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
535 ipi_bitmap <<= min - apic_id;
536 min = apic_id;
537 } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
538 max = apic_id < max ? max : apic_id;
539 } else {
540 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
541 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
542 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
543 ret);
544 min = max = apic_id;
545 ipi_bitmap = 0;
546 }
547 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
548 }
549
550 if (ipi_bitmap) {
551 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
552 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
553 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
554 ret);
555 }
556
557 local_irq_restore(flags);
558 }
559
kvm_send_ipi_mask(const struct cpumask * mask,int vector)560 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
561 {
562 __send_ipi_mask(mask, vector);
563 }
564
kvm_send_ipi_mask_allbutself(const struct cpumask * mask,int vector)565 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
566 {
567 unsigned int this_cpu = smp_processor_id();
568 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
569 const struct cpumask *local_mask;
570
571 cpumask_copy(new_mask, mask);
572 cpumask_clear_cpu(this_cpu, new_mask);
573 local_mask = new_mask;
574 __send_ipi_mask(local_mask, vector);
575 }
576
setup_efi_kvm_sev_migration(void)577 static int __init setup_efi_kvm_sev_migration(void)
578 {
579 efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
580 efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
581 efi_status_t status;
582 unsigned long size;
583 bool enabled;
584
585 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
586 !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
587 return 0;
588
589 if (!efi_enabled(EFI_BOOT))
590 return 0;
591
592 if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
593 pr_info("%s : EFI runtime services are not enabled\n", __func__);
594 return 0;
595 }
596
597 size = sizeof(enabled);
598
599 /* Get variable contents into buffer */
600 status = efi.get_variable(efi_sev_live_migration_enabled,
601 &efi_variable_guid, NULL, &size, &enabled);
602
603 if (status == EFI_NOT_FOUND) {
604 pr_info("%s : EFI live migration variable not found\n", __func__);
605 return 0;
606 }
607
608 if (status != EFI_SUCCESS) {
609 pr_info("%s : EFI variable retrieval failed\n", __func__);
610 return 0;
611 }
612
613 if (enabled == 0) {
614 pr_info("%s: live migration disabled in EFI\n", __func__);
615 return 0;
616 }
617
618 pr_info("%s : live migration enabled in EFI\n", __func__);
619 wrmsrq(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
620
621 return 1;
622 }
623
624 late_initcall(setup_efi_kvm_sev_migration);
625
626 /*
627 * Set the IPI entry points
628 */
kvm_setup_pv_ipi(void)629 static __init void kvm_setup_pv_ipi(void)
630 {
631 apic_update_callback(send_IPI_mask, kvm_send_ipi_mask);
632 apic_update_callback(send_IPI_mask_allbutself, kvm_send_ipi_mask_allbutself);
633 pr_info("setup PV IPIs\n");
634 }
635
kvm_smp_send_call_func_ipi(const struct cpumask * mask)636 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
637 {
638 int cpu;
639
640 native_send_call_func_ipi(mask);
641
642 /* Make sure other vCPUs get a chance to run if they need to. */
643 for_each_cpu(cpu, mask) {
644 if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
645 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
646 break;
647 }
648 }
649 }
650
kvm_flush_tlb_multi(const struct cpumask * cpumask,const struct flush_tlb_info * info)651 static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
652 const struct flush_tlb_info *info)
653 {
654 u8 state;
655 int cpu;
656 struct kvm_steal_time *src;
657 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
658
659 cpumask_copy(flushmask, cpumask);
660 /*
661 * We have to call flush only on online vCPUs. And
662 * queue flush_on_enter for pre-empted vCPUs
663 */
664 for_each_cpu(cpu, flushmask) {
665 /*
666 * The local vCPU is never preempted, so we do not explicitly
667 * skip check for local vCPU - it will never be cleared from
668 * flushmask.
669 */
670 src = &per_cpu(steal_time, cpu);
671 state = READ_ONCE(src->preempted);
672 if ((state & KVM_VCPU_PREEMPTED)) {
673 if (try_cmpxchg(&src->preempted, &state,
674 state | KVM_VCPU_FLUSH_TLB))
675 __cpumask_clear_cpu(cpu, flushmask);
676 }
677 }
678
679 native_flush_tlb_multi(flushmask, info);
680 }
681
kvm_alloc_cpumask(void)682 static __init int kvm_alloc_cpumask(void)
683 {
684 int cpu;
685
686 if (!kvm_para_available() || nopv)
687 return 0;
688
689 if (pv_tlb_flush_supported() || pv_ipi_supported())
690 for_each_possible_cpu(cpu) {
691 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
692 GFP_KERNEL, cpu_to_node(cpu));
693 }
694
695 return 0;
696 }
697 arch_initcall(kvm_alloc_cpumask);
698
kvm_smp_prepare_boot_cpu(void)699 static void __init kvm_smp_prepare_boot_cpu(void)
700 {
701 /*
702 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
703 * shares the guest physical address with the hypervisor.
704 */
705 sev_map_percpu_data();
706
707 kvm_guest_cpu_init();
708 native_smp_prepare_boot_cpu();
709 kvm_spinlock_init();
710 }
711
kvm_cpu_down_prepare(unsigned int cpu)712 static int kvm_cpu_down_prepare(unsigned int cpu)
713 {
714 unsigned long flags;
715
716 local_irq_save(flags);
717 kvm_guest_cpu_offline(false);
718 local_irq_restore(flags);
719 return 0;
720 }
721
722 #endif
723
kvm_suspend(void * data)724 static int kvm_suspend(void *data)
725 {
726 u64 val = 0;
727
728 kvm_guest_cpu_offline(false);
729
730 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
731 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
732 rdmsrq(MSR_KVM_POLL_CONTROL, val);
733 has_guest_poll = !(val & 1);
734 #endif
735 return 0;
736 }
737
kvm_resume(void * data)738 static void kvm_resume(void *data)
739 {
740 kvm_cpu_online(raw_smp_processor_id());
741
742 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
743 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
744 wrmsrq(MSR_KVM_POLL_CONTROL, 0);
745 #endif
746 }
747
748 static const struct syscore_ops kvm_syscore_ops = {
749 .suspend = kvm_suspend,
750 .resume = kvm_resume,
751 };
752
753 static struct syscore kvm_syscore = {
754 .ops = &kvm_syscore_ops,
755 };
756
kvm_pv_guest_cpu_reboot(void * unused)757 static void kvm_pv_guest_cpu_reboot(void *unused)
758 {
759 kvm_guest_cpu_offline(true);
760 }
761
kvm_pv_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)762 static int kvm_pv_reboot_notify(struct notifier_block *nb,
763 unsigned long code, void *unused)
764 {
765 if (code == SYS_RESTART)
766 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
767 return NOTIFY_DONE;
768 }
769
770 static struct notifier_block kvm_pv_reboot_nb = {
771 .notifier_call = kvm_pv_reboot_notify,
772 };
773
774 /*
775 * After a PV feature is registered, the host will keep writing to the
776 * registered memory location. If the guest happens to shutdown, this memory
777 * won't be valid. In cases like kexec, in which you install a new kernel, this
778 * means a random memory location will be kept being written.
779 */
780 #ifdef CONFIG_CRASH_DUMP
kvm_crash_shutdown(struct pt_regs * regs)781 static void kvm_crash_shutdown(struct pt_regs *regs)
782 {
783 kvm_guest_cpu_offline(true);
784 native_machine_crash_shutdown(regs);
785 }
786 #endif
787
788 #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
789 bool __kvm_vcpu_is_preempted(long cpu);
790
__kvm_vcpu_is_preempted(long cpu)791 __visible bool __kvm_vcpu_is_preempted(long cpu)
792 {
793 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
794
795 return !!(src->preempted & KVM_VCPU_PREEMPTED);
796 }
797 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
798
799 #else
800
801 #include <asm/asm-offsets.h>
802
803 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
804
805 /*
806 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
807 * restoring to/from the stack.
808 */
809 #define PV_VCPU_PREEMPTED_ASM \
810 "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
811 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
812 "setne %al\n\t"
813
814 DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted,
815 PV_VCPU_PREEMPTED_ASM, .text);
816 #endif
817
kvm_guest_init(void)818 static void __init kvm_guest_init(void)
819 {
820 int i;
821
822 paravirt_ops_setup();
823 register_reboot_notifier(&kvm_pv_reboot_nb);
824 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
825 raw_spin_lock_init(&async_pf_sleepers[i].lock);
826
827 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
828 has_steal_clock = 1;
829 static_call_update(pv_steal_clock, kvm_steal_clock);
830
831 pv_ops.lock.vcpu_is_preempted =
832 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
833 }
834
835 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
836 apic_update_callback(eoi, kvm_guest_apic_eoi_write);
837
838 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
839 static_branch_enable(&kvm_async_pf_enabled);
840 sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_kvm_asyncpf_interrupt);
841 }
842
843 #ifdef CONFIG_SMP
844 if (pv_tlb_flush_supported()) {
845 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
846 pr_info("KVM setup pv remote TLB flush\n");
847 }
848
849 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
850 if (pv_sched_yield_supported()) {
851 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
852 pr_info("setup PV sched yield\n");
853 }
854 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
855 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
856 pr_err("failed to install cpu hotplug callbacks\n");
857 #else
858 sev_map_percpu_data();
859 kvm_guest_cpu_init();
860 #endif
861
862 #ifdef CONFIG_CRASH_DUMP
863 machine_ops.crash_shutdown = kvm_crash_shutdown;
864 #endif
865
866 register_syscore(&kvm_syscore);
867
868 /*
869 * Hard lockup detection is enabled by default. Disable it, as guests
870 * can get false positives too easily, for example if the host is
871 * overcommitted.
872 */
873 hardlockup_detector_disable();
874 }
875
__kvm_cpuid_base(void)876 static noinline uint32_t __kvm_cpuid_base(void)
877 {
878 if (boot_cpu_data.cpuid_level < 0)
879 return 0; /* So we don't blow up on old processors */
880
881 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
882 return cpuid_base_hypervisor(KVM_SIGNATURE, 0);
883
884 return 0;
885 }
886
kvm_cpuid_base(void)887 static inline uint32_t kvm_cpuid_base(void)
888 {
889 static int kvm_cpuid_base = -1;
890
891 if (kvm_cpuid_base == -1)
892 kvm_cpuid_base = __kvm_cpuid_base();
893
894 return kvm_cpuid_base;
895 }
896
kvm_para_available(void)897 bool kvm_para_available(void)
898 {
899 return kvm_cpuid_base() != 0;
900 }
901 EXPORT_SYMBOL_GPL(kvm_para_available);
902
kvm_arch_para_features(void)903 unsigned int kvm_arch_para_features(void)
904 {
905 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
906 }
907
kvm_arch_para_hints(void)908 unsigned int kvm_arch_para_hints(void)
909 {
910 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
911 }
912 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
913
kvm_detect(void)914 static uint32_t __init kvm_detect(void)
915 {
916 return kvm_cpuid_base();
917 }
918
kvm_apic_init(void)919 static void __init kvm_apic_init(void)
920 {
921 #ifdef CONFIG_SMP
922 if (pv_ipi_supported())
923 kvm_setup_pv_ipi();
924 #endif
925 }
926
kvm_msi_ext_dest_id(void)927 static bool __init kvm_msi_ext_dest_id(void)
928 {
929 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
930 }
931
kvm_sev_hc_page_enc_status(unsigned long pfn,int npages,bool enc)932 static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
933 {
934 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
935 KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
936 }
937
kvm_init_platform(void)938 static void __init kvm_init_platform(void)
939 {
940 u64 tolud = PFN_PHYS(e820__end_of_low_ram_pfn());
941 /*
942 * Note, hardware requires variable MTRR ranges to be power-of-2 sized
943 * and naturally aligned. But when forcing guest MTRR state, Linux
944 * doesn't program the forced ranges into hardware. Don't bother doing
945 * the math to generate a technically-legal range.
946 */
947 struct mtrr_var_range pci_hole = {
948 .base_lo = tolud | X86_MEMTYPE_UC,
949 .mask_lo = (u32)(~(SZ_4G - tolud - 1)) | MTRR_PHYSMASK_V,
950 .mask_hi = (BIT_ULL(boot_cpu_data.x86_phys_bits) - 1) >> 32,
951 };
952
953 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
954 kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
955 unsigned long nr_pages;
956 int i;
957
958 pv_ops.mmu.notify_page_enc_status_changed =
959 kvm_sev_hc_page_enc_status;
960
961 /*
962 * Reset the host's shared pages list related to kernel
963 * specific page encryption status settings before we load a
964 * new kernel by kexec. Reset the page encryption status
965 * during early boot instead of just before kexec to avoid SMP
966 * races during kvm_pv_guest_cpu_reboot().
967 * NOTE: We cannot reset the complete shared pages list
968 * here as we need to retain the UEFI/OVMF firmware
969 * specific settings.
970 */
971
972 for (i = 0; i < e820_table->nr_entries; i++) {
973 struct e820_entry *entry = &e820_table->entries[i];
974
975 if (entry->type != E820_TYPE_RAM)
976 continue;
977
978 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
979
980 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
981 nr_pages,
982 KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
983 }
984
985 /*
986 * Ensure that _bss_decrypted section is marked as decrypted in the
987 * shared pages list.
988 */
989 early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
990 __end_bss_decrypted - __start_bss_decrypted, 0);
991
992 /*
993 * If not booted using EFI, enable Live migration support.
994 */
995 if (!efi_enabled(EFI_BOOT))
996 wrmsrq(MSR_KVM_MIGRATION_CONTROL,
997 KVM_MIGRATION_READY);
998 }
999 kvmclock_init();
1000 x86_platform.apic_post_init = kvm_apic_init;
1001
1002 /*
1003 * Set WB as the default cache mode for SEV-SNP and TDX, with a single
1004 * UC range for the legacy PCI hole, e.g. so that devices that expect
1005 * to get UC/WC mappings don't get surprised with WB.
1006 */
1007 guest_force_mtrr_state(&pci_hole, 1, MTRR_TYPE_WRBACK);
1008 }
1009
1010 #if defined(CONFIG_AMD_MEM_ENCRYPT)
kvm_sev_es_hcall_prepare(struct ghcb * ghcb,struct pt_regs * regs)1011 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
1012 {
1013 /* RAX and CPL are already in the GHCB */
1014 ghcb_set_rbx(ghcb, regs->bx);
1015 ghcb_set_rcx(ghcb, regs->cx);
1016 ghcb_set_rdx(ghcb, regs->dx);
1017 ghcb_set_rsi(ghcb, regs->si);
1018 }
1019
kvm_sev_es_hcall_finish(struct ghcb * ghcb,struct pt_regs * regs)1020 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
1021 {
1022 /* No checking of the return state needed */
1023 return true;
1024 }
1025 #endif
1026
1027 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
1028 .name = "KVM",
1029 .detect = kvm_detect,
1030 .type = X86_HYPER_KVM,
1031 .init.guest_late_init = kvm_guest_init,
1032 .init.x2apic_available = kvm_para_available,
1033 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
1034 .init.init_platform = kvm_init_platform,
1035 #if defined(CONFIG_AMD_MEM_ENCRYPT)
1036 .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
1037 .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
1038 #endif
1039 };
1040
activate_jump_labels(void)1041 static __init int activate_jump_labels(void)
1042 {
1043 if (has_steal_clock) {
1044 static_key_slow_inc(¶virt_steal_enabled);
1045 if (steal_acc)
1046 static_key_slow_inc(¶virt_steal_rq_enabled);
1047 }
1048
1049 return 0;
1050 }
1051 arch_initcall(activate_jump_labels);
1052
1053 #ifdef CONFIG_PARAVIRT_SPINLOCKS
1054
1055 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
kvm_kick_cpu(int cpu)1056 static void kvm_kick_cpu(int cpu)
1057 {
1058 unsigned long flags = 0;
1059 u32 apicid;
1060
1061 apicid = per_cpu(x86_cpu_to_apicid, cpu);
1062 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
1063 }
1064
1065 #include <asm/qspinlock.h>
1066
kvm_wait(u8 * ptr,u8 val)1067 static void kvm_wait(u8 *ptr, u8 val)
1068 {
1069 if (in_nmi())
1070 return;
1071
1072 /*
1073 * halt until it's our turn and kicked. Note that we do safe halt
1074 * for irq enabled case to avoid hang when lock info is overwritten
1075 * in irq spinlock slowpath and no spurious interrupt occur to save us.
1076 */
1077 if (irqs_disabled()) {
1078 if (READ_ONCE(*ptr) == val)
1079 halt();
1080 } else {
1081 local_irq_disable();
1082
1083 /* safe_halt() will enable IRQ */
1084 if (READ_ONCE(*ptr) == val)
1085 safe_halt();
1086 else
1087 local_irq_enable();
1088 }
1089 }
1090
1091 /*
1092 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
1093 */
kvm_spinlock_init(void)1094 void __init kvm_spinlock_init(void)
1095 {
1096 /*
1097 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
1098 * are available.
1099 */
1100 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
1101 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
1102 goto out;
1103 }
1104
1105 if (num_possible_cpus() == 1) {
1106 pr_info("PV spinlocks disabled, single CPU\n");
1107 goto out;
1108 }
1109
1110 if (nopvspin) {
1111 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
1112 goto out;
1113 }
1114
1115 /*
1116 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1117 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1118 * preferred over native qspinlock when vCPU is preempted.
1119 */
1120 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
1121 pr_info("PV spinlocks disabled, no host support\n");
1122 return;
1123 }
1124
1125 pr_info("PV spinlocks enabled\n");
1126
1127 __pv_init_lock_hash();
1128 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
1129 pv_ops.lock.queued_spin_unlock =
1130 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
1131 pv_ops.lock.wait = kvm_wait;
1132 pv_ops.lock.kick = kvm_kick_cpu;
1133
1134 /*
1135 * When PV spinlock is enabled which is preferred over
1136 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1137 * Just disable it anyway.
1138 */
1139 out:
1140 static_branch_disable(&virt_spin_lock_key);
1141 }
1142
1143 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
1144
1145 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1146
kvm_disable_host_haltpoll(void * i)1147 static void kvm_disable_host_haltpoll(void *i)
1148 {
1149 wrmsrq(MSR_KVM_POLL_CONTROL, 0);
1150 }
1151
kvm_enable_host_haltpoll(void * i)1152 static void kvm_enable_host_haltpoll(void *i)
1153 {
1154 wrmsrq(MSR_KVM_POLL_CONTROL, 1);
1155 }
1156
arch_haltpoll_enable(unsigned int cpu)1157 void arch_haltpoll_enable(unsigned int cpu)
1158 {
1159 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1160 pr_err_once("host does not support poll control\n");
1161 pr_err_once("host upgrade recommended\n");
1162 return;
1163 }
1164
1165 /* Enable guest halt poll disables host halt poll */
1166 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1167 }
1168 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1169
arch_haltpoll_disable(unsigned int cpu)1170 void arch_haltpoll_disable(unsigned int cpu)
1171 {
1172 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1173 return;
1174
1175 /* Disable guest halt poll enables host halt poll */
1176 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1177 }
1178 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1179 #endif
1180