1 /* 2 * KVM paravirt_ops implementation 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * 18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 19 * Copyright IBM Corporation, 2007 20 * Authors: Anthony Liguori <aliguori@us.ibm.com> 21 */ 22 23 #include <linux/context_tracking.h> 24 #include <linux/init.h> 25 #include <linux/kernel.h> 26 #include <linux/kvm_para.h> 27 #include <linux/cpu.h> 28 #include <linux/mm.h> 29 #include <linux/highmem.h> 30 #include <linux/hardirq.h> 31 #include <linux/notifier.h> 32 #include <linux/reboot.h> 33 #include <linux/hash.h> 34 #include <linux/sched.h> 35 #include <linux/slab.h> 36 #include <linux/kprobes.h> 37 #include <linux/debugfs.h> 38 #include <linux/nmi.h> 39 #include <linux/swait.h> 40 #include <asm/timer.h> 41 #include <asm/cpu.h> 42 #include <asm/traps.h> 43 #include <asm/desc.h> 44 #include <asm/tlbflush.h> 45 #include <asm/apic.h> 46 #include <asm/apicdef.h> 47 #include <asm/hypervisor.h> 48 49 static int kvmapf = 1; 50 51 static int __init parse_no_kvmapf(char *arg) 52 { 53 kvmapf = 0; 54 return 0; 55 } 56 57 early_param("no-kvmapf", parse_no_kvmapf); 58 59 static int steal_acc = 1; 60 static int __init parse_no_stealacc(char *arg) 61 { 62 steal_acc = 0; 63 return 0; 64 } 65 66 early_param("no-steal-acc", parse_no_stealacc); 67 68 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); 69 static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64); 70 static int has_steal_clock = 0; 71 72 /* 73 * No need for any "IO delay" on KVM 74 */ 75 static void kvm_io_delay(void) 76 { 77 } 78 79 #define KVM_TASK_SLEEP_HASHBITS 8 80 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) 81 82 struct kvm_task_sleep_node { 83 struct hlist_node link; 84 struct swait_queue_head wq; 85 u32 token; 86 int cpu; 87 bool halted; 88 }; 89 90 static struct kvm_task_sleep_head { 91 raw_spinlock_t lock; 92 struct hlist_head list; 93 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; 94 95 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, 96 u32 token) 97 { 98 struct hlist_node *p; 99 100 hlist_for_each(p, &b->list) { 101 struct kvm_task_sleep_node *n = 102 hlist_entry(p, typeof(*n), link); 103 if (n->token == token) 104 return n; 105 } 106 107 return NULL; 108 } 109 110 /* 111 * @interrupt_kernel: Is this called from a routine which interrupts the kernel 112 * (other than user space)? 113 */ 114 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel) 115 { 116 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 117 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 118 struct kvm_task_sleep_node n, *e; 119 DECLARE_SWAITQUEUE(wait); 120 121 rcu_irq_enter(); 122 123 raw_spin_lock(&b->lock); 124 e = _find_apf_task(b, token); 125 if (e) { 126 /* dummy entry exist -> wake up was delivered ahead of PF */ 127 hlist_del(&e->link); 128 kfree(e); 129 raw_spin_unlock(&b->lock); 130 131 rcu_irq_exit(); 132 return; 133 } 134 135 n.token = token; 136 n.cpu = smp_processor_id(); 137 n.halted = is_idle_task(current) || 138 (IS_ENABLED(CONFIG_PREEMPT_COUNT) 139 ? preempt_count() > 1 || rcu_preempt_depth() 140 : interrupt_kernel); 141 init_swait_queue_head(&n.wq); 142 hlist_add_head(&n.link, &b->list); 143 raw_spin_unlock(&b->lock); 144 145 for (;;) { 146 if (!n.halted) 147 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 148 if (hlist_unhashed(&n.link)) 149 break; 150 151 rcu_irq_exit(); 152 153 if (!n.halted) { 154 local_irq_enable(); 155 schedule(); 156 local_irq_disable(); 157 } else { 158 /* 159 * We cannot reschedule. So halt. 160 */ 161 native_safe_halt(); 162 local_irq_disable(); 163 } 164 165 rcu_irq_enter(); 166 } 167 if (!n.halted) 168 finish_swait(&n.wq, &wait); 169 170 rcu_irq_exit(); 171 return; 172 } 173 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); 174 175 static void apf_task_wake_one(struct kvm_task_sleep_node *n) 176 { 177 hlist_del_init(&n->link); 178 if (n->halted) 179 smp_send_reschedule(n->cpu); 180 else if (swq_has_sleeper(&n->wq)) 181 swake_up_one(&n->wq); 182 } 183 184 static void apf_task_wake_all(void) 185 { 186 int i; 187 188 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { 189 struct hlist_node *p, *next; 190 struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; 191 raw_spin_lock(&b->lock); 192 hlist_for_each_safe(p, next, &b->list) { 193 struct kvm_task_sleep_node *n = 194 hlist_entry(p, typeof(*n), link); 195 if (n->cpu == smp_processor_id()) 196 apf_task_wake_one(n); 197 } 198 raw_spin_unlock(&b->lock); 199 } 200 } 201 202 void kvm_async_pf_task_wake(u32 token) 203 { 204 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 205 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 206 struct kvm_task_sleep_node *n; 207 208 if (token == ~0) { 209 apf_task_wake_all(); 210 return; 211 } 212 213 again: 214 raw_spin_lock(&b->lock); 215 n = _find_apf_task(b, token); 216 if (!n) { 217 /* 218 * async PF was not yet handled. 219 * Add dummy entry for the token. 220 */ 221 n = kzalloc(sizeof(*n), GFP_ATOMIC); 222 if (!n) { 223 /* 224 * Allocation failed! Busy wait while other cpu 225 * handles async PF. 226 */ 227 raw_spin_unlock(&b->lock); 228 cpu_relax(); 229 goto again; 230 } 231 n->token = token; 232 n->cpu = smp_processor_id(); 233 init_swait_queue_head(&n->wq); 234 hlist_add_head(&n->link, &b->list); 235 } else 236 apf_task_wake_one(n); 237 raw_spin_unlock(&b->lock); 238 return; 239 } 240 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); 241 242 u32 kvm_read_and_reset_pf_reason(void) 243 { 244 u32 reason = 0; 245 246 if (__this_cpu_read(apf_reason.enabled)) { 247 reason = __this_cpu_read(apf_reason.reason); 248 __this_cpu_write(apf_reason.reason, 0); 249 } 250 251 return reason; 252 } 253 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); 254 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); 255 256 dotraplinkage void 257 do_async_page_fault(struct pt_regs *regs, unsigned long error_code) 258 { 259 enum ctx_state prev_state; 260 261 switch (kvm_read_and_reset_pf_reason()) { 262 default: 263 do_page_fault(regs, error_code); 264 break; 265 case KVM_PV_REASON_PAGE_NOT_PRESENT: 266 /* page is swapped out by the host. */ 267 prev_state = exception_enter(); 268 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs)); 269 exception_exit(prev_state); 270 break; 271 case KVM_PV_REASON_PAGE_READY: 272 rcu_irq_enter(); 273 kvm_async_pf_task_wake((u32)read_cr2()); 274 rcu_irq_exit(); 275 break; 276 } 277 } 278 NOKPROBE_SYMBOL(do_async_page_fault); 279 280 static void __init paravirt_ops_setup(void) 281 { 282 pv_info.name = "KVM"; 283 284 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) 285 pv_cpu_ops.io_delay = kvm_io_delay; 286 287 #ifdef CONFIG_X86_IO_APIC 288 no_timer_check = 1; 289 #endif 290 } 291 292 static void kvm_register_steal_time(void) 293 { 294 int cpu = smp_processor_id(); 295 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); 296 297 if (!has_steal_clock) 298 return; 299 300 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); 301 pr_info("kvm-stealtime: cpu %d, msr %llx\n", 302 cpu, (unsigned long long) slow_virt_to_phys(st)); 303 } 304 305 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; 306 307 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val) 308 { 309 /** 310 * This relies on __test_and_clear_bit to modify the memory 311 * in a way that is atomic with respect to the local CPU. 312 * The hypervisor only accesses this memory from the local CPU so 313 * there's no need for lock or memory barriers. 314 * An optimization barrier is implied in apic write. 315 */ 316 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) 317 return; 318 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK); 319 } 320 321 static void kvm_guest_cpu_init(void) 322 { 323 if (!kvm_para_available()) 324 return; 325 326 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { 327 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); 328 329 #ifdef CONFIG_PREEMPT 330 pa |= KVM_ASYNC_PF_SEND_ALWAYS; 331 #endif 332 pa |= KVM_ASYNC_PF_ENABLED; 333 334 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) 335 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 336 337 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); 338 __this_cpu_write(apf_reason.enabled, 1); 339 printk(KERN_INFO"KVM setup async PF for cpu %d\n", 340 smp_processor_id()); 341 } 342 343 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { 344 unsigned long pa; 345 /* Size alignment is implied but just to make it explicit. */ 346 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); 347 __this_cpu_write(kvm_apic_eoi, 0); 348 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) 349 | KVM_MSR_ENABLED; 350 wrmsrl(MSR_KVM_PV_EOI_EN, pa); 351 } 352 353 if (has_steal_clock) 354 kvm_register_steal_time(); 355 } 356 357 static void kvm_pv_disable_apf(void) 358 { 359 if (!__this_cpu_read(apf_reason.enabled)) 360 return; 361 362 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); 363 __this_cpu_write(apf_reason.enabled, 0); 364 365 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", 366 smp_processor_id()); 367 } 368 369 static void kvm_pv_guest_cpu_reboot(void *unused) 370 { 371 /* 372 * We disable PV EOI before we load a new kernel by kexec, 373 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. 374 * New kernel can re-enable when it boots. 375 */ 376 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 377 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 378 kvm_pv_disable_apf(); 379 kvm_disable_steal_time(); 380 } 381 382 static int kvm_pv_reboot_notify(struct notifier_block *nb, 383 unsigned long code, void *unused) 384 { 385 if (code == SYS_RESTART) 386 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); 387 return NOTIFY_DONE; 388 } 389 390 static struct notifier_block kvm_pv_reboot_nb = { 391 .notifier_call = kvm_pv_reboot_notify, 392 }; 393 394 static u64 kvm_steal_clock(int cpu) 395 { 396 u64 steal; 397 struct kvm_steal_time *src; 398 int version; 399 400 src = &per_cpu(steal_time, cpu); 401 do { 402 version = src->version; 403 virt_rmb(); 404 steal = src->steal; 405 virt_rmb(); 406 } while ((version & 1) || (version != src->version)); 407 408 return steal; 409 } 410 411 void kvm_disable_steal_time(void) 412 { 413 if (!has_steal_clock) 414 return; 415 416 wrmsr(MSR_KVM_STEAL_TIME, 0, 0); 417 } 418 419 static inline void __set_percpu_decrypted(void *ptr, unsigned long size) 420 { 421 early_set_memory_decrypted((unsigned long) ptr, size); 422 } 423 424 /* 425 * Iterate through all possible CPUs and map the memory region pointed 426 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once. 427 * 428 * Note: we iterate through all possible CPUs to ensure that CPUs 429 * hotplugged will have their per-cpu variable already mapped as 430 * decrypted. 431 */ 432 static void __init sev_map_percpu_data(void) 433 { 434 int cpu; 435 436 if (!sev_active()) 437 return; 438 439 for_each_possible_cpu(cpu) { 440 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason)); 441 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time)); 442 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi)); 443 } 444 } 445 446 #ifdef CONFIG_SMP 447 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) 448 449 static void __send_ipi_mask(const struct cpumask *mask, int vector) 450 { 451 unsigned long flags; 452 int cpu, apic_id, icr; 453 int min = 0, max = 0; 454 #ifdef CONFIG_X86_64 455 __uint128_t ipi_bitmap = 0; 456 #else 457 u64 ipi_bitmap = 0; 458 #endif 459 460 if (cpumask_empty(mask)) 461 return; 462 463 local_irq_save(flags); 464 465 switch (vector) { 466 default: 467 icr = APIC_DM_FIXED | vector; 468 break; 469 case NMI_VECTOR: 470 icr = APIC_DM_NMI; 471 break; 472 } 473 474 for_each_cpu(cpu, mask) { 475 apic_id = per_cpu(x86_cpu_to_apicid, cpu); 476 if (!ipi_bitmap) { 477 min = max = apic_id; 478 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { 479 ipi_bitmap <<= min - apic_id; 480 min = apic_id; 481 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { 482 max = apic_id < max ? max : apic_id; 483 } else { 484 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 485 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 486 min = max = apic_id; 487 ipi_bitmap = 0; 488 } 489 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); 490 } 491 492 if (ipi_bitmap) { 493 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 494 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 495 } 496 497 local_irq_restore(flags); 498 } 499 500 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector) 501 { 502 __send_ipi_mask(mask, vector); 503 } 504 505 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) 506 { 507 unsigned int this_cpu = smp_processor_id(); 508 struct cpumask new_mask; 509 const struct cpumask *local_mask; 510 511 cpumask_copy(&new_mask, mask); 512 cpumask_clear_cpu(this_cpu, &new_mask); 513 local_mask = &new_mask; 514 __send_ipi_mask(local_mask, vector); 515 } 516 517 static void kvm_send_ipi_allbutself(int vector) 518 { 519 kvm_send_ipi_mask_allbutself(cpu_online_mask, vector); 520 } 521 522 static void kvm_send_ipi_all(int vector) 523 { 524 __send_ipi_mask(cpu_online_mask, vector); 525 } 526 527 /* 528 * Set the IPI entry points 529 */ 530 static void kvm_setup_pv_ipi(void) 531 { 532 apic->send_IPI_mask = kvm_send_ipi_mask; 533 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; 534 apic->send_IPI_allbutself = kvm_send_ipi_allbutself; 535 apic->send_IPI_all = kvm_send_ipi_all; 536 pr_info("KVM setup pv IPIs\n"); 537 } 538 539 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) 540 { 541 native_smp_prepare_cpus(max_cpus); 542 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) 543 static_branch_disable(&virt_spin_lock_key); 544 } 545 546 static void __init kvm_smp_prepare_boot_cpu(void) 547 { 548 /* 549 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() 550 * shares the guest physical address with the hypervisor. 551 */ 552 sev_map_percpu_data(); 553 554 kvm_guest_cpu_init(); 555 native_smp_prepare_boot_cpu(); 556 kvm_spinlock_init(); 557 } 558 559 static void kvm_guest_cpu_offline(void) 560 { 561 kvm_disable_steal_time(); 562 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 563 wrmsrl(MSR_KVM_PV_EOI_EN, 0); 564 kvm_pv_disable_apf(); 565 apf_task_wake_all(); 566 } 567 568 static int kvm_cpu_online(unsigned int cpu) 569 { 570 local_irq_disable(); 571 kvm_guest_cpu_init(); 572 local_irq_enable(); 573 return 0; 574 } 575 576 static int kvm_cpu_down_prepare(unsigned int cpu) 577 { 578 local_irq_disable(); 579 kvm_guest_cpu_offline(); 580 local_irq_enable(); 581 return 0; 582 } 583 #endif 584 585 static void __init kvm_apf_trap_init(void) 586 { 587 update_intr_gate(X86_TRAP_PF, async_page_fault); 588 } 589 590 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask); 591 592 static void kvm_flush_tlb_others(const struct cpumask *cpumask, 593 const struct flush_tlb_info *info) 594 { 595 u8 state; 596 int cpu; 597 struct kvm_steal_time *src; 598 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); 599 600 cpumask_copy(flushmask, cpumask); 601 /* 602 * We have to call flush only on online vCPUs. And 603 * queue flush_on_enter for pre-empted vCPUs 604 */ 605 for_each_cpu(cpu, flushmask) { 606 src = &per_cpu(steal_time, cpu); 607 state = READ_ONCE(src->preempted); 608 if ((state & KVM_VCPU_PREEMPTED)) { 609 if (try_cmpxchg(&src->preempted, &state, 610 state | KVM_VCPU_FLUSH_TLB)) 611 __cpumask_clear_cpu(cpu, flushmask); 612 } 613 } 614 615 native_flush_tlb_others(flushmask, info); 616 } 617 618 static void __init kvm_guest_init(void) 619 { 620 int i; 621 622 if (!kvm_para_available()) 623 return; 624 625 paravirt_ops_setup(); 626 register_reboot_notifier(&kvm_pv_reboot_nb); 627 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) 628 raw_spin_lock_init(&async_pf_sleepers[i].lock); 629 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) 630 x86_init.irqs.trap_init = kvm_apf_trap_init; 631 632 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 633 has_steal_clock = 1; 634 pv_time_ops.steal_clock = kvm_steal_clock; 635 } 636 637 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 638 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 639 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) 640 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; 641 642 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 643 apic_set_eoi_write(kvm_guest_apic_eoi_write); 644 645 #ifdef CONFIG_SMP 646 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus; 647 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 648 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online", 649 kvm_cpu_online, kvm_cpu_down_prepare) < 0) 650 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n"); 651 #else 652 sev_map_percpu_data(); 653 kvm_guest_cpu_init(); 654 #endif 655 656 /* 657 * Hard lockup detection is enabled by default. Disable it, as guests 658 * can get false positives too easily, for example if the host is 659 * overcommitted. 660 */ 661 hardlockup_detector_disable(); 662 } 663 664 static noinline uint32_t __kvm_cpuid_base(void) 665 { 666 if (boot_cpu_data.cpuid_level < 0) 667 return 0; /* So we don't blow up on old processors */ 668 669 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 670 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); 671 672 return 0; 673 } 674 675 static inline uint32_t kvm_cpuid_base(void) 676 { 677 static int kvm_cpuid_base = -1; 678 679 if (kvm_cpuid_base == -1) 680 kvm_cpuid_base = __kvm_cpuid_base(); 681 682 return kvm_cpuid_base; 683 } 684 685 bool kvm_para_available(void) 686 { 687 return kvm_cpuid_base() != 0; 688 } 689 EXPORT_SYMBOL_GPL(kvm_para_available); 690 691 unsigned int kvm_arch_para_features(void) 692 { 693 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); 694 } 695 696 unsigned int kvm_arch_para_hints(void) 697 { 698 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); 699 } 700 701 static uint32_t __init kvm_detect(void) 702 { 703 return kvm_cpuid_base(); 704 } 705 706 static void __init kvm_apic_init(void) 707 { 708 #if defined(CONFIG_SMP) 709 if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) 710 kvm_setup_pv_ipi(); 711 #endif 712 } 713 714 static void __init kvm_init_platform(void) 715 { 716 kvmclock_init(); 717 x86_platform.apic_post_init = kvm_apic_init; 718 } 719 720 const __initconst struct hypervisor_x86 x86_hyper_kvm = { 721 .name = "KVM", 722 .detect = kvm_detect, 723 .type = X86_HYPER_KVM, 724 .init.guest_late_init = kvm_guest_init, 725 .init.x2apic_available = kvm_para_available, 726 .init.init_platform = kvm_init_platform, 727 }; 728 729 static __init int activate_jump_labels(void) 730 { 731 if (has_steal_clock) { 732 static_key_slow_inc(¶virt_steal_enabled); 733 if (steal_acc) 734 static_key_slow_inc(¶virt_steal_rq_enabled); 735 } 736 737 return 0; 738 } 739 arch_initcall(activate_jump_labels); 740 741 static __init int kvm_setup_pv_tlb_flush(void) 742 { 743 int cpu; 744 745 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 746 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 747 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 748 for_each_possible_cpu(cpu) { 749 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), 750 GFP_KERNEL, cpu_to_node(cpu)); 751 } 752 pr_info("KVM setup pv remote TLB flush\n"); 753 } 754 755 return 0; 756 } 757 arch_initcall(kvm_setup_pv_tlb_flush); 758 759 #ifdef CONFIG_PARAVIRT_SPINLOCKS 760 761 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ 762 static void kvm_kick_cpu(int cpu) 763 { 764 int apicid; 765 unsigned long flags = 0; 766 767 apicid = per_cpu(x86_cpu_to_apicid, cpu); 768 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); 769 } 770 771 #include <asm/qspinlock.h> 772 773 static void kvm_wait(u8 *ptr, u8 val) 774 { 775 unsigned long flags; 776 777 if (in_nmi()) 778 return; 779 780 local_irq_save(flags); 781 782 if (READ_ONCE(*ptr) != val) 783 goto out; 784 785 /* 786 * halt until it's our turn and kicked. Note that we do safe halt 787 * for irq enabled case to avoid hang when lock info is overwritten 788 * in irq spinlock slowpath and no spurious interrupt occur to save us. 789 */ 790 if (arch_irqs_disabled_flags(flags)) 791 halt(); 792 else 793 safe_halt(); 794 795 out: 796 local_irq_restore(flags); 797 } 798 799 #ifdef CONFIG_X86_32 800 __visible bool __kvm_vcpu_is_preempted(long cpu) 801 { 802 struct kvm_steal_time *src = &per_cpu(steal_time, cpu); 803 804 return !!(src->preempted & KVM_VCPU_PREEMPTED); 805 } 806 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); 807 808 #else 809 810 #include <asm/asm-offsets.h> 811 812 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); 813 814 /* 815 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and 816 * restoring to/from the stack. 817 */ 818 asm( 819 ".pushsection .text;" 820 ".global __raw_callee_save___kvm_vcpu_is_preempted;" 821 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;" 822 "__raw_callee_save___kvm_vcpu_is_preempted:" 823 "movq __per_cpu_offset(,%rdi,8), %rax;" 824 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" 825 "setne %al;" 826 "ret;" 827 ".popsection"); 828 829 #endif 830 831 /* 832 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. 833 */ 834 void __init kvm_spinlock_init(void) 835 { 836 if (!kvm_para_available()) 837 return; 838 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ 839 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 840 return; 841 842 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) 843 return; 844 845 /* Don't use the pvqspinlock code if there is only 1 vCPU. */ 846 if (num_possible_cpus() == 1) 847 return; 848 849 __pv_init_lock_hash(); 850 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 851 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 852 pv_lock_ops.wait = kvm_wait; 853 pv_lock_ops.kick = kvm_kick_cpu; 854 855 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 856 pv_lock_ops.vcpu_is_preempted = 857 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); 858 } 859 } 860 861 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 862