1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Hyper-V specific spinlock code. 5 * 6 * Copyright (C) 2018, Intel, Inc. 7 * 8 * Author : Yi Sun <yi.y.sun@intel.com> 9 */ 10 11 #define pr_fmt(fmt) "Hyper-V: " fmt 12 13 #include <linux/spinlock.h> 14 15 #include <asm/mshyperv.h> 16 #include <asm/paravirt.h> 17 #include <asm/apic.h> 18 #include <asm/msr.h> 19 20 static bool hv_pvspin __initdata = true; 21 22 static void hv_qlock_kick(int cpu) 23 { 24 __apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR); 25 } 26 27 static void hv_qlock_wait(u8 *byte, u8 val) 28 { 29 unsigned long flags; 30 31 if (in_nmi()) 32 return; 33 34 /* 35 * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the 36 * vCPU can be put into 'idle' state. This 'idle' state is 37 * terminated by an IPI, usually from hv_qlock_kick(), even if 38 * interrupts are disabled on the vCPU. 39 * 40 * To prevent a race against the unlock path it is required to 41 * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE 42 * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between 43 * the lock value check and the rdmsrq() then the vCPU might be put 44 * into 'idle' state by the hypervisor and kept in that state for 45 * an unspecified amount of time. 46 */ 47 local_irq_save(flags); 48 /* 49 * Only issue the rdmsrq() when the lock state has not changed. 50 */ 51 if (READ_ONCE(*byte) == val) { 52 unsigned long msr_val; 53 54 rdmsrq(HV_X64_MSR_GUEST_IDLE, msr_val); 55 56 (void)msr_val; 57 } 58 local_irq_restore(flags); 59 } 60 61 /* 62 * Hyper-V does not support this so far. 63 */ 64 __visible bool hv_vcpu_is_preempted(int vcpu) 65 { 66 return false; 67 } 68 69 PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted); 70 71 void __init hv_init_spinlocks(void) 72 { 73 if (!hv_pvspin || !apic || 74 !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) || 75 !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) { 76 pr_info("PV spinlocks disabled\n"); 77 return; 78 } 79 pr_info("PV spinlocks enabled\n"); 80 81 __pv_init_lock_hash(); 82 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 83 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 84 pv_ops.lock.wait = hv_qlock_wait; 85 pv_ops.lock.kick = hv_qlock_kick; 86 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted); 87 } 88 89 static __init int hv_parse_nopvspin(char *arg) 90 { 91 hv_pvspin = false; 92 return 0; 93 } 94 early_param("hv_nopvspin", hv_parse_nopvspin); 95