xref: /linux/arch/x86/kernel/kvm.c (revision c6ed444fd6fffaaf2e3857d926ed18bf3df81e8e)
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/apic.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 
49 static int kvmapf = 1;
50 
51 static int __init parse_no_kvmapf(char *arg)
52 {
53         kvmapf = 0;
54         return 0;
55 }
56 
57 early_param("no-kvmapf", parse_no_kvmapf);
58 
59 static int steal_acc = 1;
60 static int __init parse_no_stealacc(char *arg)
61 {
62         steal_acc = 0;
63         return 0;
64 }
65 
66 early_param("no-steal-acc", parse_no_stealacc);
67 
68 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
69 static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
70 static int has_steal_clock = 0;
71 
72 /*
73  * No need for any "IO delay" on KVM
74  */
75 static void kvm_io_delay(void)
76 {
77 }
78 
79 #define KVM_TASK_SLEEP_HASHBITS 8
80 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
81 
82 struct kvm_task_sleep_node {
83 	struct hlist_node link;
84 	struct swait_queue_head wq;
85 	u32 token;
86 	int cpu;
87 	bool halted;
88 };
89 
90 static struct kvm_task_sleep_head {
91 	raw_spinlock_t lock;
92 	struct hlist_head list;
93 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
94 
95 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
96 						  u32 token)
97 {
98 	struct hlist_node *p;
99 
100 	hlist_for_each(p, &b->list) {
101 		struct kvm_task_sleep_node *n =
102 			hlist_entry(p, typeof(*n), link);
103 		if (n->token == token)
104 			return n;
105 	}
106 
107 	return NULL;
108 }
109 
110 /*
111  * @interrupt_kernel: Is this called from a routine which interrupts the kernel
112  * 		      (other than user space)?
113  */
114 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
115 {
116 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
117 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
118 	struct kvm_task_sleep_node n, *e;
119 	DECLARE_SWAITQUEUE(wait);
120 
121 	rcu_irq_enter();
122 
123 	raw_spin_lock(&b->lock);
124 	e = _find_apf_task(b, token);
125 	if (e) {
126 		/* dummy entry exist -> wake up was delivered ahead of PF */
127 		hlist_del(&e->link);
128 		kfree(e);
129 		raw_spin_unlock(&b->lock);
130 
131 		rcu_irq_exit();
132 		return;
133 	}
134 
135 	n.token = token;
136 	n.cpu = smp_processor_id();
137 	n.halted = is_idle_task(current) ||
138 		   (IS_ENABLED(CONFIG_PREEMPT_COUNT)
139 		    ? preempt_count() > 1 || rcu_preempt_depth()
140 		    : interrupt_kernel);
141 	init_swait_queue_head(&n.wq);
142 	hlist_add_head(&n.link, &b->list);
143 	raw_spin_unlock(&b->lock);
144 
145 	for (;;) {
146 		if (!n.halted)
147 			prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
148 		if (hlist_unhashed(&n.link))
149 			break;
150 
151 		rcu_irq_exit();
152 
153 		if (!n.halted) {
154 			local_irq_enable();
155 			schedule();
156 			local_irq_disable();
157 		} else {
158 			/*
159 			 * We cannot reschedule. So halt.
160 			 */
161 			native_safe_halt();
162 			local_irq_disable();
163 		}
164 
165 		rcu_irq_enter();
166 	}
167 	if (!n.halted)
168 		finish_swait(&n.wq, &wait);
169 
170 	rcu_irq_exit();
171 	return;
172 }
173 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
174 
175 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
176 {
177 	hlist_del_init(&n->link);
178 	if (n->halted)
179 		smp_send_reschedule(n->cpu);
180 	else if (swq_has_sleeper(&n->wq))
181 		swake_up_one(&n->wq);
182 }
183 
184 static void apf_task_wake_all(void)
185 {
186 	int i;
187 
188 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189 		struct hlist_node *p, *next;
190 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191 		raw_spin_lock(&b->lock);
192 		hlist_for_each_safe(p, next, &b->list) {
193 			struct kvm_task_sleep_node *n =
194 				hlist_entry(p, typeof(*n), link);
195 			if (n->cpu == smp_processor_id())
196 				apf_task_wake_one(n);
197 		}
198 		raw_spin_unlock(&b->lock);
199 	}
200 }
201 
202 void kvm_async_pf_task_wake(u32 token)
203 {
204 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206 	struct kvm_task_sleep_node *n;
207 
208 	if (token == ~0) {
209 		apf_task_wake_all();
210 		return;
211 	}
212 
213 again:
214 	raw_spin_lock(&b->lock);
215 	n = _find_apf_task(b, token);
216 	if (!n) {
217 		/*
218 		 * async PF was not yet handled.
219 		 * Add dummy entry for the token.
220 		 */
221 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
222 		if (!n) {
223 			/*
224 			 * Allocation failed! Busy wait while other cpu
225 			 * handles async PF.
226 			 */
227 			raw_spin_unlock(&b->lock);
228 			cpu_relax();
229 			goto again;
230 		}
231 		n->token = token;
232 		n->cpu = smp_processor_id();
233 		init_swait_queue_head(&n->wq);
234 		hlist_add_head(&n->link, &b->list);
235 	} else
236 		apf_task_wake_one(n);
237 	raw_spin_unlock(&b->lock);
238 	return;
239 }
240 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
241 
242 u32 kvm_read_and_reset_pf_reason(void)
243 {
244 	u32 reason = 0;
245 
246 	if (__this_cpu_read(apf_reason.enabled)) {
247 		reason = __this_cpu_read(apf_reason.reason);
248 		__this_cpu_write(apf_reason.reason, 0);
249 	}
250 
251 	return reason;
252 }
253 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
255 
256 dotraplinkage void
257 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
258 {
259 	enum ctx_state prev_state;
260 
261 	switch (kvm_read_and_reset_pf_reason()) {
262 	default:
263 		do_page_fault(regs, error_code);
264 		break;
265 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
266 		/* page is swapped out by the host. */
267 		prev_state = exception_enter();
268 		kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
269 		exception_exit(prev_state);
270 		break;
271 	case KVM_PV_REASON_PAGE_READY:
272 		rcu_irq_enter();
273 		kvm_async_pf_task_wake((u32)read_cr2());
274 		rcu_irq_exit();
275 		break;
276 	}
277 }
278 NOKPROBE_SYMBOL(do_async_page_fault);
279 
280 static void __init paravirt_ops_setup(void)
281 {
282 	pv_info.name = "KVM";
283 
284 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
285 		pv_cpu_ops.io_delay = kvm_io_delay;
286 
287 #ifdef CONFIG_X86_IO_APIC
288 	no_timer_check = 1;
289 #endif
290 }
291 
292 static void kvm_register_steal_time(void)
293 {
294 	int cpu = smp_processor_id();
295 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
296 
297 	if (!has_steal_clock)
298 		return;
299 
300 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
301 	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
302 		cpu, (unsigned long long) slow_virt_to_phys(st));
303 }
304 
305 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
306 
307 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
308 {
309 	/**
310 	 * This relies on __test_and_clear_bit to modify the memory
311 	 * in a way that is atomic with respect to the local CPU.
312 	 * The hypervisor only accesses this memory from the local CPU so
313 	 * there's no need for lock or memory barriers.
314 	 * An optimization barrier is implied in apic write.
315 	 */
316 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
317 		return;
318 	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
319 }
320 
321 static void kvm_guest_cpu_init(void)
322 {
323 	if (!kvm_para_available())
324 		return;
325 
326 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
327 		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
328 
329 #ifdef CONFIG_PREEMPT
330 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
331 #endif
332 		pa |= KVM_ASYNC_PF_ENABLED;
333 
334 		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
335 			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
336 
337 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
338 		__this_cpu_write(apf_reason.enabled, 1);
339 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
340 		       smp_processor_id());
341 	}
342 
343 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
344 		unsigned long pa;
345 		/* Size alignment is implied but just to make it explicit. */
346 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
347 		__this_cpu_write(kvm_apic_eoi, 0);
348 		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
349 			| KVM_MSR_ENABLED;
350 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
351 	}
352 
353 	if (has_steal_clock)
354 		kvm_register_steal_time();
355 }
356 
357 static void kvm_pv_disable_apf(void)
358 {
359 	if (!__this_cpu_read(apf_reason.enabled))
360 		return;
361 
362 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
363 	__this_cpu_write(apf_reason.enabled, 0);
364 
365 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
366 	       smp_processor_id());
367 }
368 
369 static void kvm_pv_guest_cpu_reboot(void *unused)
370 {
371 	/*
372 	 * We disable PV EOI before we load a new kernel by kexec,
373 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
374 	 * New kernel can re-enable when it boots.
375 	 */
376 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
377 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
378 	kvm_pv_disable_apf();
379 	kvm_disable_steal_time();
380 }
381 
382 static int kvm_pv_reboot_notify(struct notifier_block *nb,
383 				unsigned long code, void *unused)
384 {
385 	if (code == SYS_RESTART)
386 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
387 	return NOTIFY_DONE;
388 }
389 
390 static struct notifier_block kvm_pv_reboot_nb = {
391 	.notifier_call = kvm_pv_reboot_notify,
392 };
393 
394 static u64 kvm_steal_clock(int cpu)
395 {
396 	u64 steal;
397 	struct kvm_steal_time *src;
398 	int version;
399 
400 	src = &per_cpu(steal_time, cpu);
401 	do {
402 		version = src->version;
403 		virt_rmb();
404 		steal = src->steal;
405 		virt_rmb();
406 	} while ((version & 1) || (version != src->version));
407 
408 	return steal;
409 }
410 
411 void kvm_disable_steal_time(void)
412 {
413 	if (!has_steal_clock)
414 		return;
415 
416 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
417 }
418 
419 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
420 {
421 	early_set_memory_decrypted((unsigned long) ptr, size);
422 }
423 
424 /*
425  * Iterate through all possible CPUs and map the memory region pointed
426  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
427  *
428  * Note: we iterate through all possible CPUs to ensure that CPUs
429  * hotplugged will have their per-cpu variable already mapped as
430  * decrypted.
431  */
432 static void __init sev_map_percpu_data(void)
433 {
434 	int cpu;
435 
436 	if (!sev_active())
437 		return;
438 
439 	for_each_possible_cpu(cpu) {
440 		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
441 		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
442 		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
443 	}
444 }
445 
446 #ifdef CONFIG_SMP
447 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
448 {
449 	native_smp_prepare_cpus(max_cpus);
450 	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
451 		static_branch_disable(&virt_spin_lock_key);
452 }
453 
454 static void __init kvm_smp_prepare_boot_cpu(void)
455 {
456 	/*
457 	 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
458 	 * shares the guest physical address with the hypervisor.
459 	 */
460 	sev_map_percpu_data();
461 
462 	kvm_guest_cpu_init();
463 	native_smp_prepare_boot_cpu();
464 	kvm_spinlock_init();
465 }
466 
467 static void kvm_guest_cpu_offline(void)
468 {
469 	kvm_disable_steal_time();
470 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
471 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
472 	kvm_pv_disable_apf();
473 	apf_task_wake_all();
474 }
475 
476 static int kvm_cpu_online(unsigned int cpu)
477 {
478 	local_irq_disable();
479 	kvm_guest_cpu_init();
480 	local_irq_enable();
481 	return 0;
482 }
483 
484 static int kvm_cpu_down_prepare(unsigned int cpu)
485 {
486 	local_irq_disable();
487 	kvm_guest_cpu_offline();
488 	local_irq_enable();
489 	return 0;
490 }
491 #endif
492 
493 static void __init kvm_apf_trap_init(void)
494 {
495 	update_intr_gate(X86_TRAP_PF, async_page_fault);
496 }
497 
498 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
499 
500 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
501 			const struct flush_tlb_info *info)
502 {
503 	u8 state;
504 	int cpu;
505 	struct kvm_steal_time *src;
506 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
507 
508 	cpumask_copy(flushmask, cpumask);
509 	/*
510 	 * We have to call flush only on online vCPUs. And
511 	 * queue flush_on_enter for pre-empted vCPUs
512 	 */
513 	for_each_cpu(cpu, flushmask) {
514 		src = &per_cpu(steal_time, cpu);
515 		state = READ_ONCE(src->preempted);
516 		if ((state & KVM_VCPU_PREEMPTED)) {
517 			if (try_cmpxchg(&src->preempted, &state,
518 					state | KVM_VCPU_FLUSH_TLB))
519 				__cpumask_clear_cpu(cpu, flushmask);
520 		}
521 	}
522 
523 	native_flush_tlb_others(flushmask, info);
524 }
525 
526 static void __init kvm_guest_init(void)
527 {
528 	int i;
529 
530 	if (!kvm_para_available())
531 		return;
532 
533 	paravirt_ops_setup();
534 	register_reboot_notifier(&kvm_pv_reboot_nb);
535 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
536 		raw_spin_lock_init(&async_pf_sleepers[i].lock);
537 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
538 		x86_init.irqs.trap_init = kvm_apf_trap_init;
539 
540 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
541 		has_steal_clock = 1;
542 		pv_time_ops.steal_clock = kvm_steal_clock;
543 	}
544 
545 	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
546 	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
547 	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
548 		pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
549 
550 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
551 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
552 
553 #ifdef CONFIG_SMP
554 	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
555 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
556 	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
557 				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
558 		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
559 #else
560 	sev_map_percpu_data();
561 	kvm_guest_cpu_init();
562 #endif
563 
564 	/*
565 	 * Hard lockup detection is enabled by default. Disable it, as guests
566 	 * can get false positives too easily, for example if the host is
567 	 * overcommitted.
568 	 */
569 	hardlockup_detector_disable();
570 }
571 
572 static noinline uint32_t __kvm_cpuid_base(void)
573 {
574 	if (boot_cpu_data.cpuid_level < 0)
575 		return 0;	/* So we don't blow up on old processors */
576 
577 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
578 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
579 
580 	return 0;
581 }
582 
583 static inline uint32_t kvm_cpuid_base(void)
584 {
585 	static int kvm_cpuid_base = -1;
586 
587 	if (kvm_cpuid_base == -1)
588 		kvm_cpuid_base = __kvm_cpuid_base();
589 
590 	return kvm_cpuid_base;
591 }
592 
593 bool kvm_para_available(void)
594 {
595 	return kvm_cpuid_base() != 0;
596 }
597 EXPORT_SYMBOL_GPL(kvm_para_available);
598 
599 unsigned int kvm_arch_para_features(void)
600 {
601 	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
602 }
603 
604 unsigned int kvm_arch_para_hints(void)
605 {
606 	return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
607 }
608 
609 static uint32_t __init kvm_detect(void)
610 {
611 	return kvm_cpuid_base();
612 }
613 
614 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
615 	.name			= "KVM",
616 	.detect			= kvm_detect,
617 	.type			= X86_HYPER_KVM,
618 	.init.init_platform	= kvmclock_init,
619 	.init.guest_late_init	= kvm_guest_init,
620 	.init.x2apic_available	= kvm_para_available,
621 };
622 
623 static __init int activate_jump_labels(void)
624 {
625 	if (has_steal_clock) {
626 		static_key_slow_inc(&paravirt_steal_enabled);
627 		if (steal_acc)
628 			static_key_slow_inc(&paravirt_steal_rq_enabled);
629 	}
630 
631 	return 0;
632 }
633 arch_initcall(activate_jump_labels);
634 
635 static __init int kvm_setup_pv_tlb_flush(void)
636 {
637 	int cpu;
638 
639 	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
640 	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
641 	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
642 		for_each_possible_cpu(cpu) {
643 			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
644 				GFP_KERNEL, cpu_to_node(cpu));
645 		}
646 		pr_info("KVM setup pv remote TLB flush\n");
647 	}
648 
649 	return 0;
650 }
651 arch_initcall(kvm_setup_pv_tlb_flush);
652 
653 #ifdef CONFIG_PARAVIRT_SPINLOCKS
654 
655 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
656 static void kvm_kick_cpu(int cpu)
657 {
658 	int apicid;
659 	unsigned long flags = 0;
660 
661 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
662 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
663 }
664 
665 #include <asm/qspinlock.h>
666 
667 static void kvm_wait(u8 *ptr, u8 val)
668 {
669 	unsigned long flags;
670 
671 	if (in_nmi())
672 		return;
673 
674 	local_irq_save(flags);
675 
676 	if (READ_ONCE(*ptr) != val)
677 		goto out;
678 
679 	/*
680 	 * halt until it's our turn and kicked. Note that we do safe halt
681 	 * for irq enabled case to avoid hang when lock info is overwritten
682 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
683 	 */
684 	if (arch_irqs_disabled_flags(flags))
685 		halt();
686 	else
687 		safe_halt();
688 
689 out:
690 	local_irq_restore(flags);
691 }
692 
693 #ifdef CONFIG_X86_32
694 __visible bool __kvm_vcpu_is_preempted(long cpu)
695 {
696 	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
697 
698 	return !!(src->preempted & KVM_VCPU_PREEMPTED);
699 }
700 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
701 
702 #else
703 
704 #include <asm/asm-offsets.h>
705 
706 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
707 
708 /*
709  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
710  * restoring to/from the stack.
711  */
712 asm(
713 ".pushsection .text;"
714 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
715 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
716 "__raw_callee_save___kvm_vcpu_is_preempted:"
717 "movq	__per_cpu_offset(,%rdi,8), %rax;"
718 "cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
719 "setne	%al;"
720 "ret;"
721 ".popsection");
722 
723 #endif
724 
725 /*
726  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
727  */
728 void __init kvm_spinlock_init(void)
729 {
730 	if (!kvm_para_available())
731 		return;
732 	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
733 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
734 		return;
735 
736 	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
737 		return;
738 
739 	__pv_init_lock_hash();
740 	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
741 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
742 	pv_lock_ops.wait = kvm_wait;
743 	pv_lock_ops.kick = kvm_kick_cpu;
744 
745 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
746 		pv_lock_ops.vcpu_is_preempted =
747 			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
748 	}
749 }
750 
751 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
752