xref: /linux/arch/x86/kernel/kvm.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/kvm_para.h>
26 #include <linux/cpu.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/hardirq.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/hash.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/kprobes.h>
36 #include <asm/timer.h>
37 #include <asm/cpu.h>
38 #include <asm/traps.h>
39 #include <asm/desc.h>
40 #include <asm/tlbflush.h>
41 #include <asm/idle.h>
42 #include <asm/apic.h>
43 #include <asm/apicdef.h>
44 #include <asm/hypervisor.h>
45 #include <asm/kvm_guest.h>
46 
47 static int kvmapf = 1;
48 
49 static int parse_no_kvmapf(char *arg)
50 {
51         kvmapf = 0;
52         return 0;
53 }
54 
55 early_param("no-kvmapf", parse_no_kvmapf);
56 
57 static int steal_acc = 1;
58 static int parse_no_stealacc(char *arg)
59 {
60         steal_acc = 0;
61         return 0;
62 }
63 
64 early_param("no-steal-acc", parse_no_stealacc);
65 
66 static int kvmclock_vsyscall = 1;
67 static int parse_no_kvmclock_vsyscall(char *arg)
68 {
69         kvmclock_vsyscall = 0;
70         return 0;
71 }
72 
73 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
74 
75 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
76 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
77 static int has_steal_clock = 0;
78 
79 /*
80  * No need for any "IO delay" on KVM
81  */
82 static void kvm_io_delay(void)
83 {
84 }
85 
86 #define KVM_TASK_SLEEP_HASHBITS 8
87 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
88 
89 struct kvm_task_sleep_node {
90 	struct hlist_node link;
91 	wait_queue_head_t wq;
92 	u32 token;
93 	int cpu;
94 	bool halted;
95 };
96 
97 static struct kvm_task_sleep_head {
98 	spinlock_t lock;
99 	struct hlist_head list;
100 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
101 
102 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
103 						  u32 token)
104 {
105 	struct hlist_node *p;
106 
107 	hlist_for_each(p, &b->list) {
108 		struct kvm_task_sleep_node *n =
109 			hlist_entry(p, typeof(*n), link);
110 		if (n->token == token)
111 			return n;
112 	}
113 
114 	return NULL;
115 }
116 
117 void kvm_async_pf_task_wait(u32 token)
118 {
119 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
120 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
121 	struct kvm_task_sleep_node n, *e;
122 	DEFINE_WAIT(wait);
123 
124 	spin_lock(&b->lock);
125 	e = _find_apf_task(b, token);
126 	if (e) {
127 		/* dummy entry exist -> wake up was delivered ahead of PF */
128 		hlist_del(&e->link);
129 		kfree(e);
130 		spin_unlock(&b->lock);
131 		return;
132 	}
133 
134 	n.token = token;
135 	n.cpu = smp_processor_id();
136 	n.halted = is_idle_task(current) || preempt_count() > 1;
137 	init_waitqueue_head(&n.wq);
138 	hlist_add_head(&n.link, &b->list);
139 	spin_unlock(&b->lock);
140 
141 	for (;;) {
142 		if (!n.halted)
143 			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
144 		if (hlist_unhashed(&n.link))
145 			break;
146 
147 		if (!n.halted) {
148 			local_irq_enable();
149 			schedule();
150 			local_irq_disable();
151 		} else {
152 			/*
153 			 * We cannot reschedule. So halt.
154 			 */
155 			native_safe_halt();
156 			local_irq_disable();
157 		}
158 	}
159 	if (!n.halted)
160 		finish_wait(&n.wq, &wait);
161 
162 	return;
163 }
164 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
165 
166 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
167 {
168 	hlist_del_init(&n->link);
169 	if (n->halted)
170 		smp_send_reschedule(n->cpu);
171 	else if (waitqueue_active(&n->wq))
172 		wake_up(&n->wq);
173 }
174 
175 static void apf_task_wake_all(void)
176 {
177 	int i;
178 
179 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
180 		struct hlist_node *p, *next;
181 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
182 		spin_lock(&b->lock);
183 		hlist_for_each_safe(p, next, &b->list) {
184 			struct kvm_task_sleep_node *n =
185 				hlist_entry(p, typeof(*n), link);
186 			if (n->cpu == smp_processor_id())
187 				apf_task_wake_one(n);
188 		}
189 		spin_unlock(&b->lock);
190 	}
191 }
192 
193 void kvm_async_pf_task_wake(u32 token)
194 {
195 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
196 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
197 	struct kvm_task_sleep_node *n;
198 
199 	if (token == ~0) {
200 		apf_task_wake_all();
201 		return;
202 	}
203 
204 again:
205 	spin_lock(&b->lock);
206 	n = _find_apf_task(b, token);
207 	if (!n) {
208 		/*
209 		 * async PF was not yet handled.
210 		 * Add dummy entry for the token.
211 		 */
212 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
213 		if (!n) {
214 			/*
215 			 * Allocation failed! Busy wait while other cpu
216 			 * handles async PF.
217 			 */
218 			spin_unlock(&b->lock);
219 			cpu_relax();
220 			goto again;
221 		}
222 		n->token = token;
223 		n->cpu = smp_processor_id();
224 		init_waitqueue_head(&n->wq);
225 		hlist_add_head(&n->link, &b->list);
226 	} else
227 		apf_task_wake_one(n);
228 	spin_unlock(&b->lock);
229 	return;
230 }
231 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
232 
233 u32 kvm_read_and_reset_pf_reason(void)
234 {
235 	u32 reason = 0;
236 
237 	if (__get_cpu_var(apf_reason).enabled) {
238 		reason = __get_cpu_var(apf_reason).reason;
239 		__get_cpu_var(apf_reason).reason = 0;
240 	}
241 
242 	return reason;
243 }
244 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
245 
246 dotraplinkage void __kprobes
247 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
248 {
249 	switch (kvm_read_and_reset_pf_reason()) {
250 	default:
251 		do_page_fault(regs, error_code);
252 		break;
253 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
254 		/* page is swapped out by the host. */
255 		rcu_irq_enter();
256 		exit_idle();
257 		kvm_async_pf_task_wait((u32)read_cr2());
258 		rcu_irq_exit();
259 		break;
260 	case KVM_PV_REASON_PAGE_READY:
261 		rcu_irq_enter();
262 		exit_idle();
263 		kvm_async_pf_task_wake((u32)read_cr2());
264 		rcu_irq_exit();
265 		break;
266 	}
267 }
268 
269 static void __init paravirt_ops_setup(void)
270 {
271 	pv_info.name = "KVM";
272 	pv_info.paravirt_enabled = 1;
273 
274 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
275 		pv_cpu_ops.io_delay = kvm_io_delay;
276 
277 #ifdef CONFIG_X86_IO_APIC
278 	no_timer_check = 1;
279 #endif
280 }
281 
282 static void kvm_register_steal_time(void)
283 {
284 	int cpu = smp_processor_id();
285 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
286 
287 	if (!has_steal_clock)
288 		return;
289 
290 	memset(st, 0, sizeof(*st));
291 
292 	wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
293 	printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
294 		cpu, __pa(st));
295 }
296 
297 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
298 
299 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
300 {
301 	/**
302 	 * This relies on __test_and_clear_bit to modify the memory
303 	 * in a way that is atomic with respect to the local CPU.
304 	 * The hypervisor only accesses this memory from the local CPU so
305 	 * there's no need for lock or memory barriers.
306 	 * An optimization barrier is implied in apic write.
307 	 */
308 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
309 		return;
310 	apic_write(APIC_EOI, APIC_EOI_ACK);
311 }
312 
313 void __cpuinit kvm_guest_cpu_init(void)
314 {
315 	if (!kvm_para_available())
316 		return;
317 
318 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
319 		u64 pa = __pa(&__get_cpu_var(apf_reason));
320 
321 #ifdef CONFIG_PREEMPT
322 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
323 #endif
324 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
325 		__get_cpu_var(apf_reason).enabled = 1;
326 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
327 		       smp_processor_id());
328 	}
329 
330 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
331 		unsigned long pa;
332 		/* Size alignment is implied but just to make it explicit. */
333 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
334 		__get_cpu_var(kvm_apic_eoi) = 0;
335 		pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
336 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
337 	}
338 
339 	if (has_steal_clock)
340 		kvm_register_steal_time();
341 }
342 
343 static void kvm_pv_disable_apf(void)
344 {
345 	if (!__get_cpu_var(apf_reason).enabled)
346 		return;
347 
348 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
349 	__get_cpu_var(apf_reason).enabled = 0;
350 
351 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
352 	       smp_processor_id());
353 }
354 
355 static void kvm_pv_guest_cpu_reboot(void *unused)
356 {
357 	/*
358 	 * We disable PV EOI before we load a new kernel by kexec,
359 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
360 	 * New kernel can re-enable when it boots.
361 	 */
362 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
363 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
364 	kvm_pv_disable_apf();
365 	kvm_disable_steal_time();
366 }
367 
368 static int kvm_pv_reboot_notify(struct notifier_block *nb,
369 				unsigned long code, void *unused)
370 {
371 	if (code == SYS_RESTART)
372 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
373 	return NOTIFY_DONE;
374 }
375 
376 static struct notifier_block kvm_pv_reboot_nb = {
377 	.notifier_call = kvm_pv_reboot_notify,
378 };
379 
380 static u64 kvm_steal_clock(int cpu)
381 {
382 	u64 steal;
383 	struct kvm_steal_time *src;
384 	int version;
385 
386 	src = &per_cpu(steal_time, cpu);
387 	do {
388 		version = src->version;
389 		rmb();
390 		steal = src->steal;
391 		rmb();
392 	} while ((version & 1) || (version != src->version));
393 
394 	return steal;
395 }
396 
397 void kvm_disable_steal_time(void)
398 {
399 	if (!has_steal_clock)
400 		return;
401 
402 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
403 }
404 
405 #ifdef CONFIG_SMP
406 static void __init kvm_smp_prepare_boot_cpu(void)
407 {
408 	WARN_ON(kvm_register_clock("primary cpu clock"));
409 	kvm_guest_cpu_init();
410 	native_smp_prepare_boot_cpu();
411 }
412 
413 static void __cpuinit kvm_guest_cpu_online(void *dummy)
414 {
415 	kvm_guest_cpu_init();
416 }
417 
418 static void kvm_guest_cpu_offline(void *dummy)
419 {
420 	kvm_disable_steal_time();
421 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
422 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
423 	kvm_pv_disable_apf();
424 	apf_task_wake_all();
425 }
426 
427 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
428 				    unsigned long action, void *hcpu)
429 {
430 	int cpu = (unsigned long)hcpu;
431 	switch (action) {
432 	case CPU_ONLINE:
433 	case CPU_DOWN_FAILED:
434 	case CPU_ONLINE_FROZEN:
435 		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
436 		break;
437 	case CPU_DOWN_PREPARE:
438 	case CPU_DOWN_PREPARE_FROZEN:
439 		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
440 		break;
441 	default:
442 		break;
443 	}
444 	return NOTIFY_OK;
445 }
446 
447 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
448         .notifier_call  = kvm_cpu_notify,
449 };
450 #endif
451 
452 static void __init kvm_apf_trap_init(void)
453 {
454 	set_intr_gate(14, &async_page_fault);
455 }
456 
457 void __init kvm_guest_init(void)
458 {
459 	int i;
460 
461 	if (!kvm_para_available())
462 		return;
463 
464 	paravirt_ops_setup();
465 	register_reboot_notifier(&kvm_pv_reboot_nb);
466 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
467 		spin_lock_init(&async_pf_sleepers[i].lock);
468 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
469 		x86_init.irqs.trap_init = kvm_apf_trap_init;
470 
471 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
472 		has_steal_clock = 1;
473 		pv_time_ops.steal_clock = kvm_steal_clock;
474 	}
475 
476 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
477 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
478 
479 	if (kvmclock_vsyscall)
480 		kvm_setup_vsyscall_timeinfo();
481 
482 #ifdef CONFIG_SMP
483 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
484 	register_cpu_notifier(&kvm_cpu_notifier);
485 #else
486 	kvm_guest_cpu_init();
487 #endif
488 }
489 
490 static bool __init kvm_detect(void)
491 {
492 	if (!kvm_para_available())
493 		return false;
494 	return true;
495 }
496 
497 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
498 	.name			= "KVM",
499 	.detect			= kvm_detect,
500 };
501 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
502 
503 static __init int activate_jump_labels(void)
504 {
505 	if (has_steal_clock) {
506 		static_key_slow_inc(&paravirt_steal_enabled);
507 		if (steal_acc)
508 			static_key_slow_inc(&paravirt_steal_rq_enabled);
509 	}
510 
511 	return 0;
512 }
513 arch_initcall(activate_jump_labels);
514