xref: /linux/arch/x86/kernel/kvm.c (revision 2dbf708448c836754d25fe6108c5bfe1f5697c95)
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/kvm_para.h>
26 #include <linux/cpu.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/hardirq.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/hash.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/kprobes.h>
36 #include <asm/timer.h>
37 #include <asm/cpu.h>
38 #include <asm/traps.h>
39 #include <asm/desc.h>
40 #include <asm/tlbflush.h>
41 #include <asm/idle.h>
42 
43 static int kvmapf = 1;
44 
45 static int parse_no_kvmapf(char *arg)
46 {
47         kvmapf = 0;
48         return 0;
49 }
50 
51 early_param("no-kvmapf", parse_no_kvmapf);
52 
53 static int steal_acc = 1;
54 static int parse_no_stealacc(char *arg)
55 {
56         steal_acc = 0;
57         return 0;
58 }
59 
60 early_param("no-steal-acc", parse_no_stealacc);
61 
62 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
63 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
64 static int has_steal_clock = 0;
65 
66 /*
67  * No need for any "IO delay" on KVM
68  */
69 static void kvm_io_delay(void)
70 {
71 }
72 
73 #define KVM_TASK_SLEEP_HASHBITS 8
74 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
75 
76 struct kvm_task_sleep_node {
77 	struct hlist_node link;
78 	wait_queue_head_t wq;
79 	u32 token;
80 	int cpu;
81 	bool halted;
82 	struct mm_struct *mm;
83 };
84 
85 static struct kvm_task_sleep_head {
86 	spinlock_t lock;
87 	struct hlist_head list;
88 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
89 
90 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
91 						  u32 token)
92 {
93 	struct hlist_node *p;
94 
95 	hlist_for_each(p, &b->list) {
96 		struct kvm_task_sleep_node *n =
97 			hlist_entry(p, typeof(*n), link);
98 		if (n->token == token)
99 			return n;
100 	}
101 
102 	return NULL;
103 }
104 
105 void kvm_async_pf_task_wait(u32 token)
106 {
107 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
108 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
109 	struct kvm_task_sleep_node n, *e;
110 	DEFINE_WAIT(wait);
111 	int cpu, idle;
112 
113 	cpu = get_cpu();
114 	idle = idle_cpu(cpu);
115 	put_cpu();
116 
117 	spin_lock(&b->lock);
118 	e = _find_apf_task(b, token);
119 	if (e) {
120 		/* dummy entry exist -> wake up was delivered ahead of PF */
121 		hlist_del(&e->link);
122 		kfree(e);
123 		spin_unlock(&b->lock);
124 		return;
125 	}
126 
127 	n.token = token;
128 	n.cpu = smp_processor_id();
129 	n.mm = current->active_mm;
130 	n.halted = idle || preempt_count() > 1;
131 	atomic_inc(&n.mm->mm_count);
132 	init_waitqueue_head(&n.wq);
133 	hlist_add_head(&n.link, &b->list);
134 	spin_unlock(&b->lock);
135 
136 	for (;;) {
137 		if (!n.halted)
138 			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
139 		if (hlist_unhashed(&n.link))
140 			break;
141 
142 		if (!n.halted) {
143 			local_irq_enable();
144 			schedule();
145 			local_irq_disable();
146 		} else {
147 			/*
148 			 * We cannot reschedule. So halt.
149 			 */
150 			native_safe_halt();
151 			local_irq_disable();
152 		}
153 	}
154 	if (!n.halted)
155 		finish_wait(&n.wq, &wait);
156 
157 	return;
158 }
159 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
160 
161 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
162 {
163 	hlist_del_init(&n->link);
164 	if (!n->mm)
165 		return;
166 	mmdrop(n->mm);
167 	if (n->halted)
168 		smp_send_reschedule(n->cpu);
169 	else if (waitqueue_active(&n->wq))
170 		wake_up(&n->wq);
171 }
172 
173 static void apf_task_wake_all(void)
174 {
175 	int i;
176 
177 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
178 		struct hlist_node *p, *next;
179 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
180 		spin_lock(&b->lock);
181 		hlist_for_each_safe(p, next, &b->list) {
182 			struct kvm_task_sleep_node *n =
183 				hlist_entry(p, typeof(*n), link);
184 			if (n->cpu == smp_processor_id())
185 				apf_task_wake_one(n);
186 		}
187 		spin_unlock(&b->lock);
188 	}
189 }
190 
191 void kvm_async_pf_task_wake(u32 token)
192 {
193 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
194 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
195 	struct kvm_task_sleep_node *n;
196 
197 	if (token == ~0) {
198 		apf_task_wake_all();
199 		return;
200 	}
201 
202 again:
203 	spin_lock(&b->lock);
204 	n = _find_apf_task(b, token);
205 	if (!n) {
206 		/*
207 		 * async PF was not yet handled.
208 		 * Add dummy entry for the token.
209 		 */
210 		n = kmalloc(sizeof(*n), GFP_ATOMIC);
211 		if (!n) {
212 			/*
213 			 * Allocation failed! Busy wait while other cpu
214 			 * handles async PF.
215 			 */
216 			spin_unlock(&b->lock);
217 			cpu_relax();
218 			goto again;
219 		}
220 		n->token = token;
221 		n->cpu = smp_processor_id();
222 		n->mm = NULL;
223 		init_waitqueue_head(&n->wq);
224 		hlist_add_head(&n->link, &b->list);
225 	} else
226 		apf_task_wake_one(n);
227 	spin_unlock(&b->lock);
228 	return;
229 }
230 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
231 
232 u32 kvm_read_and_reset_pf_reason(void)
233 {
234 	u32 reason = 0;
235 
236 	if (__get_cpu_var(apf_reason).enabled) {
237 		reason = __get_cpu_var(apf_reason).reason;
238 		__get_cpu_var(apf_reason).reason = 0;
239 	}
240 
241 	return reason;
242 }
243 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
244 
245 dotraplinkage void __kprobes
246 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
247 {
248 	switch (kvm_read_and_reset_pf_reason()) {
249 	default:
250 		do_page_fault(regs, error_code);
251 		break;
252 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
253 		/* page is swapped out by the host. */
254 		kvm_async_pf_task_wait((u32)read_cr2());
255 		break;
256 	case KVM_PV_REASON_PAGE_READY:
257 		rcu_irq_enter();
258 		exit_idle();
259 		kvm_async_pf_task_wake((u32)read_cr2());
260 		rcu_irq_exit();
261 		break;
262 	}
263 }
264 
265 static void __init paravirt_ops_setup(void)
266 {
267 	pv_info.name = "KVM";
268 	pv_info.paravirt_enabled = 1;
269 
270 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
271 		pv_cpu_ops.io_delay = kvm_io_delay;
272 
273 #ifdef CONFIG_X86_IO_APIC
274 	no_timer_check = 1;
275 #endif
276 }
277 
278 static void kvm_register_steal_time(void)
279 {
280 	int cpu = smp_processor_id();
281 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
282 
283 	if (!has_steal_clock)
284 		return;
285 
286 	memset(st, 0, sizeof(*st));
287 
288 	wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
289 	printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
290 		cpu, __pa(st));
291 }
292 
293 void __cpuinit kvm_guest_cpu_init(void)
294 {
295 	if (!kvm_para_available())
296 		return;
297 
298 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
299 		u64 pa = __pa(&__get_cpu_var(apf_reason));
300 
301 #ifdef CONFIG_PREEMPT
302 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
303 #endif
304 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
305 		__get_cpu_var(apf_reason).enabled = 1;
306 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
307 		       smp_processor_id());
308 	}
309 
310 	if (has_steal_clock)
311 		kvm_register_steal_time();
312 }
313 
314 static void kvm_pv_disable_apf(void *unused)
315 {
316 	if (!__get_cpu_var(apf_reason).enabled)
317 		return;
318 
319 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
320 	__get_cpu_var(apf_reason).enabled = 0;
321 
322 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
323 	       smp_processor_id());
324 }
325 
326 static int kvm_pv_reboot_notify(struct notifier_block *nb,
327 				unsigned long code, void *unused)
328 {
329 	if (code == SYS_RESTART)
330 		on_each_cpu(kvm_pv_disable_apf, NULL, 1);
331 	return NOTIFY_DONE;
332 }
333 
334 static struct notifier_block kvm_pv_reboot_nb = {
335 	.notifier_call = kvm_pv_reboot_notify,
336 };
337 
338 static u64 kvm_steal_clock(int cpu)
339 {
340 	u64 steal;
341 	struct kvm_steal_time *src;
342 	int version;
343 
344 	src = &per_cpu(steal_time, cpu);
345 	do {
346 		version = src->version;
347 		rmb();
348 		steal = src->steal;
349 		rmb();
350 	} while ((version & 1) || (version != src->version));
351 
352 	return steal;
353 }
354 
355 void kvm_disable_steal_time(void)
356 {
357 	if (!has_steal_clock)
358 		return;
359 
360 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
361 }
362 
363 #ifdef CONFIG_SMP
364 static void __init kvm_smp_prepare_boot_cpu(void)
365 {
366 #ifdef CONFIG_KVM_CLOCK
367 	WARN_ON(kvm_register_clock("primary cpu clock"));
368 #endif
369 	kvm_guest_cpu_init();
370 	native_smp_prepare_boot_cpu();
371 }
372 
373 static void __cpuinit kvm_guest_cpu_online(void *dummy)
374 {
375 	kvm_guest_cpu_init();
376 }
377 
378 static void kvm_guest_cpu_offline(void *dummy)
379 {
380 	kvm_disable_steal_time();
381 	kvm_pv_disable_apf(NULL);
382 	apf_task_wake_all();
383 }
384 
385 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
386 				    unsigned long action, void *hcpu)
387 {
388 	int cpu = (unsigned long)hcpu;
389 	switch (action) {
390 	case CPU_ONLINE:
391 	case CPU_DOWN_FAILED:
392 	case CPU_ONLINE_FROZEN:
393 		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
394 		break;
395 	case CPU_DOWN_PREPARE:
396 	case CPU_DOWN_PREPARE_FROZEN:
397 		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
398 		break;
399 	default:
400 		break;
401 	}
402 	return NOTIFY_OK;
403 }
404 
405 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
406         .notifier_call  = kvm_cpu_notify,
407 };
408 #endif
409 
410 static void __init kvm_apf_trap_init(void)
411 {
412 	set_intr_gate(14, &async_page_fault);
413 }
414 
415 void __init kvm_guest_init(void)
416 {
417 	int i;
418 
419 	if (!kvm_para_available())
420 		return;
421 
422 	paravirt_ops_setup();
423 	register_reboot_notifier(&kvm_pv_reboot_nb);
424 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
425 		spin_lock_init(&async_pf_sleepers[i].lock);
426 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
427 		x86_init.irqs.trap_init = kvm_apf_trap_init;
428 
429 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
430 		has_steal_clock = 1;
431 		pv_time_ops.steal_clock = kvm_steal_clock;
432 	}
433 
434 #ifdef CONFIG_SMP
435 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
436 	register_cpu_notifier(&kvm_cpu_notifier);
437 #else
438 	kvm_guest_cpu_init();
439 #endif
440 }
441 
442 static __init int activate_jump_labels(void)
443 {
444 	if (has_steal_clock) {
445 		static_key_slow_inc(&paravirt_steal_enabled);
446 		if (steal_acc)
447 			static_key_slow_inc(&paravirt_steal_rq_enabled);
448 	}
449 
450 	return 0;
451 }
452 arch_initcall(activate_jump_labels);
453