xref: /linux/drivers/hv/hv.c (revision 47679cde604d6977b390d5b0fc83dedf8a82f66d)
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *
21  */
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hyperv.h>
29 #include <linux/version.h>
30 #include <linux/interrupt.h>
31 #include <linux/clockchips.h>
32 #include <asm/hyperv.h>
33 #include <asm/mshyperv.h>
34 #include "hyperv_vmbus.h"
35 
36 /* The one and only */
37 struct hv_context hv_context = {
38 	.synic_initialized	= false,
39 	.hypercall_page		= NULL,
40 };
41 
42 #define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
43 #define HV_MAX_MAX_DELTA_TICKS 0xffffffff
44 #define HV_MIN_DELTA_TICKS 1
45 
46 /*
47  * query_hypervisor_info - Get version info of the windows hypervisor
48  */
49 unsigned int host_info_eax;
50 unsigned int host_info_ebx;
51 unsigned int host_info_ecx;
52 unsigned int host_info_edx;
53 
54 static int query_hypervisor_info(void)
55 {
56 	unsigned int eax;
57 	unsigned int ebx;
58 	unsigned int ecx;
59 	unsigned int edx;
60 	unsigned int max_leaf;
61 	unsigned int op;
62 
63 	/*
64 	* Its assumed that this is called after confirming that Viridian
65 	* is present. Query id and revision.
66 	*/
67 	eax = 0;
68 	ebx = 0;
69 	ecx = 0;
70 	edx = 0;
71 	op = HVCPUID_VENDOR_MAXFUNCTION;
72 	cpuid(op, &eax, &ebx, &ecx, &edx);
73 
74 	max_leaf = eax;
75 
76 	if (max_leaf >= HVCPUID_VERSION) {
77 		eax = 0;
78 		ebx = 0;
79 		ecx = 0;
80 		edx = 0;
81 		op = HVCPUID_VERSION;
82 		cpuid(op, &eax, &ebx, &ecx, &edx);
83 		host_info_eax = eax;
84 		host_info_ebx = ebx;
85 		host_info_ecx = ecx;
86 		host_info_edx = edx;
87 	}
88 	return max_leaf;
89 }
90 
91 /*
92  * hv_do_hypercall- Invoke the specified hypercall
93  */
94 u64 hv_do_hypercall(u64 control, void *input, void *output)
95 {
96 	u64 input_address = (input) ? virt_to_phys(input) : 0;
97 	u64 output_address = (output) ? virt_to_phys(output) : 0;
98 	void *hypercall_page = hv_context.hypercall_page;
99 #ifdef CONFIG_X86_64
100 	u64 hv_status = 0;
101 
102 	if (!hypercall_page)
103 		return (u64)ULLONG_MAX;
104 
105 	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
106 	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
107 			     "c" (control), "d" (input_address),
108 			     "m" (hypercall_page));
109 
110 	return hv_status;
111 
112 #else
113 
114 	u32 control_hi = control >> 32;
115 	u32 control_lo = control & 0xFFFFFFFF;
116 	u32 hv_status_hi = 1;
117 	u32 hv_status_lo = 1;
118 	u32 input_address_hi = input_address >> 32;
119 	u32 input_address_lo = input_address & 0xFFFFFFFF;
120 	u32 output_address_hi = output_address >> 32;
121 	u32 output_address_lo = output_address & 0xFFFFFFFF;
122 
123 	if (!hypercall_page)
124 		return (u64)ULLONG_MAX;
125 
126 	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
127 			      "=a"(hv_status_lo) : "d" (control_hi),
128 			      "a" (control_lo), "b" (input_address_hi),
129 			      "c" (input_address_lo), "D"(output_address_hi),
130 			      "S"(output_address_lo), "m" (hypercall_page));
131 
132 	return hv_status_lo | ((u64)hv_status_hi << 32);
133 #endif /* !x86_64 */
134 }
135 EXPORT_SYMBOL_GPL(hv_do_hypercall);
136 
137 #ifdef CONFIG_X86_64
138 static cycle_t read_hv_clock_tsc(struct clocksource *arg)
139 {
140 	cycle_t current_tick;
141 	struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
142 
143 	if (tsc_pg->tsc_sequence != 0) {
144 		/*
145 		 * Use the tsc page to compute the value.
146 		 */
147 
148 		while (1) {
149 			cycle_t tmp;
150 			u32 sequence = tsc_pg->tsc_sequence;
151 			u64 cur_tsc;
152 			u64 scale = tsc_pg->tsc_scale;
153 			s64 offset = tsc_pg->tsc_offset;
154 
155 			rdtscll(cur_tsc);
156 			/* current_tick = ((cur_tsc *scale) >> 64) + offset */
157 			asm("mulq %3"
158 				: "=d" (current_tick), "=a" (tmp)
159 				: "a" (cur_tsc), "r" (scale));
160 
161 			current_tick += offset;
162 			if (tsc_pg->tsc_sequence == sequence)
163 				return current_tick;
164 
165 			if (tsc_pg->tsc_sequence != 0)
166 				continue;
167 			/*
168 			 * Fallback using MSR method.
169 			 */
170 			break;
171 		}
172 	}
173 	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
174 	return current_tick;
175 }
176 
177 static struct clocksource hyperv_cs_tsc = {
178 		.name           = "hyperv_clocksource_tsc_page",
179 		.rating         = 425,
180 		.read           = read_hv_clock_tsc,
181 		.mask           = CLOCKSOURCE_MASK(64),
182 		.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
183 };
184 #endif
185 
186 
187 /*
188  * hv_init - Main initialization routine.
189  *
190  * This routine must be called before any other routines in here are called
191  */
192 int hv_init(void)
193 {
194 	int max_leaf;
195 	union hv_x64_msr_hypercall_contents hypercall_msr;
196 	void *virtaddr = NULL;
197 
198 	memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
199 	memset(hv_context.synic_message_page, 0,
200 	       sizeof(void *) * NR_CPUS);
201 	memset(hv_context.post_msg_page, 0,
202 	       sizeof(void *) * NR_CPUS);
203 	memset(hv_context.vp_index, 0,
204 	       sizeof(int) * NR_CPUS);
205 	memset(hv_context.event_dpc, 0,
206 	       sizeof(void *) * NR_CPUS);
207 	memset(hv_context.clk_evt, 0,
208 	       sizeof(void *) * NR_CPUS);
209 
210 	max_leaf = query_hypervisor_info();
211 
212 	/*
213 	 * Write our OS ID.
214 	 */
215 	hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
216 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
217 
218 	/* See if the hypercall page is already set */
219 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
220 
221 	virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
222 
223 	if (!virtaddr)
224 		goto cleanup;
225 
226 	hypercall_msr.enable = 1;
227 
228 	hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
229 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
230 
231 	/* Confirm that hypercall page did get setup. */
232 	hypercall_msr.as_uint64 = 0;
233 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
234 
235 	if (!hypercall_msr.enable)
236 		goto cleanup;
237 
238 	hv_context.hypercall_page = virtaddr;
239 
240 #ifdef CONFIG_X86_64
241 	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
242 		union hv_x64_msr_hypercall_contents tsc_msr;
243 		void *va_tsc;
244 
245 		va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
246 		if (!va_tsc)
247 			goto cleanup;
248 		hv_context.tsc_page = va_tsc;
249 
250 		rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
251 
252 		tsc_msr.enable = 1;
253 		tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
254 
255 		wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
256 		clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
257 	}
258 #endif
259 	return 0;
260 
261 cleanup:
262 	if (virtaddr) {
263 		if (hypercall_msr.enable) {
264 			hypercall_msr.as_uint64 = 0;
265 			wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
266 		}
267 
268 		vfree(virtaddr);
269 	}
270 
271 	return -ENOTSUPP;
272 }
273 
274 /*
275  * hv_cleanup - Cleanup routine.
276  *
277  * This routine is called normally during driver unloading or exiting.
278  */
279 void hv_cleanup(void)
280 {
281 	union hv_x64_msr_hypercall_contents hypercall_msr;
282 
283 	/* Reset our OS id */
284 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
285 
286 	if (hv_context.hypercall_page) {
287 		hypercall_msr.as_uint64 = 0;
288 		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
289 		vfree(hv_context.hypercall_page);
290 		hv_context.hypercall_page = NULL;
291 	}
292 
293 #ifdef CONFIG_X86_64
294 	/*
295 	 * Cleanup the TSC page based CS.
296 	 */
297 	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
298 		/*
299 		 * Crash can happen in an interrupt context and unregistering
300 		 * a clocksource is impossible and redundant in this case.
301 		 */
302 		if (!oops_in_progress) {
303 			clocksource_change_rating(&hyperv_cs_tsc, 10);
304 			clocksource_unregister(&hyperv_cs_tsc);
305 		}
306 
307 		hypercall_msr.as_uint64 = 0;
308 		wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
309 		vfree(hv_context.tsc_page);
310 		hv_context.tsc_page = NULL;
311 	}
312 #endif
313 }
314 
315 /*
316  * hv_post_message - Post a message using the hypervisor message IPC.
317  *
318  * This involves a hypercall.
319  */
320 int hv_post_message(union hv_connection_id connection_id,
321 		  enum hv_message_type message_type,
322 		  void *payload, size_t payload_size)
323 {
324 
325 	struct hv_input_post_message *aligned_msg;
326 	u64 status;
327 
328 	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
329 		return -EMSGSIZE;
330 
331 	aligned_msg = (struct hv_input_post_message *)
332 			hv_context.post_msg_page[get_cpu()];
333 
334 	aligned_msg->connectionid = connection_id;
335 	aligned_msg->reserved = 0;
336 	aligned_msg->message_type = message_type;
337 	aligned_msg->payload_size = payload_size;
338 	memcpy((void *)aligned_msg->payload, payload, payload_size);
339 
340 	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
341 
342 	put_cpu();
343 	return status & 0xFFFF;
344 }
345 
346 static int hv_ce_set_next_event(unsigned long delta,
347 				struct clock_event_device *evt)
348 {
349 	cycle_t current_tick;
350 
351 	WARN_ON(!clockevent_state_oneshot(evt));
352 
353 	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
354 	current_tick += delta;
355 	wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
356 	return 0;
357 }
358 
359 static int hv_ce_shutdown(struct clock_event_device *evt)
360 {
361 	wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
362 	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
363 
364 	return 0;
365 }
366 
367 static int hv_ce_set_oneshot(struct clock_event_device *evt)
368 {
369 	union hv_timer_config timer_cfg;
370 
371 	timer_cfg.enable = 1;
372 	timer_cfg.auto_enable = 1;
373 	timer_cfg.sintx = VMBUS_MESSAGE_SINT;
374 	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
375 
376 	return 0;
377 }
378 
379 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
380 {
381 	dev->name = "Hyper-V clockevent";
382 	dev->features = CLOCK_EVT_FEAT_ONESHOT;
383 	dev->cpumask = cpumask_of(cpu);
384 	dev->rating = 1000;
385 	/*
386 	 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
387 	 * result in clockevents_config_and_register() taking additional
388 	 * references to the hv_vmbus module making it impossible to unload.
389 	 */
390 
391 	dev->set_state_shutdown = hv_ce_shutdown;
392 	dev->set_state_oneshot = hv_ce_set_oneshot;
393 	dev->set_next_event = hv_ce_set_next_event;
394 }
395 
396 
397 int hv_synic_alloc(void)
398 {
399 	size_t size = sizeof(struct tasklet_struct);
400 	size_t ced_size = sizeof(struct clock_event_device);
401 	int cpu;
402 
403 	hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
404 					 GFP_ATOMIC);
405 	if (hv_context.hv_numa_map == NULL) {
406 		pr_err("Unable to allocate NUMA map\n");
407 		goto err;
408 	}
409 
410 	for_each_online_cpu(cpu) {
411 		hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
412 		if (hv_context.event_dpc[cpu] == NULL) {
413 			pr_err("Unable to allocate event dpc\n");
414 			goto err;
415 		}
416 		tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
417 
418 		hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
419 		if (hv_context.clk_evt[cpu] == NULL) {
420 			pr_err("Unable to allocate clock event device\n");
421 			goto err;
422 		}
423 
424 		hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
425 
426 		hv_context.synic_message_page[cpu] =
427 			(void *)get_zeroed_page(GFP_ATOMIC);
428 
429 		if (hv_context.synic_message_page[cpu] == NULL) {
430 			pr_err("Unable to allocate SYNIC message page\n");
431 			goto err;
432 		}
433 
434 		hv_context.synic_event_page[cpu] =
435 			(void *)get_zeroed_page(GFP_ATOMIC);
436 
437 		if (hv_context.synic_event_page[cpu] == NULL) {
438 			pr_err("Unable to allocate SYNIC event page\n");
439 			goto err;
440 		}
441 
442 		hv_context.post_msg_page[cpu] =
443 			(void *)get_zeroed_page(GFP_ATOMIC);
444 
445 		if (hv_context.post_msg_page[cpu] == NULL) {
446 			pr_err("Unable to allocate post msg page\n");
447 			goto err;
448 		}
449 	}
450 
451 	return 0;
452 err:
453 	return -ENOMEM;
454 }
455 
456 static void hv_synic_free_cpu(int cpu)
457 {
458 	kfree(hv_context.event_dpc[cpu]);
459 	kfree(hv_context.clk_evt[cpu]);
460 	if (hv_context.synic_event_page[cpu])
461 		free_page((unsigned long)hv_context.synic_event_page[cpu]);
462 	if (hv_context.synic_message_page[cpu])
463 		free_page((unsigned long)hv_context.synic_message_page[cpu]);
464 	if (hv_context.post_msg_page[cpu])
465 		free_page((unsigned long)hv_context.post_msg_page[cpu]);
466 }
467 
468 void hv_synic_free(void)
469 {
470 	int cpu;
471 
472 	kfree(hv_context.hv_numa_map);
473 	for_each_online_cpu(cpu)
474 		hv_synic_free_cpu(cpu);
475 }
476 
477 /*
478  * hv_synic_init - Initialize the Synthethic Interrupt Controller.
479  *
480  * If it is already initialized by another entity (ie x2v shim), we need to
481  * retrieve the initialized message and event pages.  Otherwise, we create and
482  * initialize the message and event pages.
483  */
484 void hv_synic_init(void *arg)
485 {
486 	u64 version;
487 	union hv_synic_simp simp;
488 	union hv_synic_siefp siefp;
489 	union hv_synic_sint shared_sint;
490 	union hv_synic_scontrol sctrl;
491 	u64 vp_index;
492 
493 	int cpu = smp_processor_id();
494 
495 	if (!hv_context.hypercall_page)
496 		return;
497 
498 	/* Check the version */
499 	rdmsrl(HV_X64_MSR_SVERSION, version);
500 
501 	/* Setup the Synic's message page */
502 	rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
503 	simp.simp_enabled = 1;
504 	simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
505 		>> PAGE_SHIFT;
506 
507 	wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
508 
509 	/* Setup the Synic's event page */
510 	rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
511 	siefp.siefp_enabled = 1;
512 	siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
513 		>> PAGE_SHIFT;
514 
515 	wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
516 
517 	/* Setup the shared SINT. */
518 	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
519 
520 	shared_sint.as_uint64 = 0;
521 	shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
522 	shared_sint.masked = false;
523 	shared_sint.auto_eoi = true;
524 
525 	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
526 
527 	/* Enable the global synic bit */
528 	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
529 	sctrl.enable = 1;
530 
531 	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
532 
533 	hv_context.synic_initialized = true;
534 
535 	/*
536 	 * Setup the mapping between Hyper-V's notion
537 	 * of cpuid and Linux' notion of cpuid.
538 	 * This array will be indexed using Linux cpuid.
539 	 */
540 	rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
541 	hv_context.vp_index[cpu] = (u32)vp_index;
542 
543 	INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
544 
545 	/*
546 	 * Register the per-cpu clockevent source.
547 	 */
548 	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
549 		clockevents_config_and_register(hv_context.clk_evt[cpu],
550 						HV_TIMER_FREQUENCY,
551 						HV_MIN_DELTA_TICKS,
552 						HV_MAX_MAX_DELTA_TICKS);
553 	return;
554 }
555 
556 /*
557  * hv_synic_clockevents_cleanup - Cleanup clockevent devices
558  */
559 void hv_synic_clockevents_cleanup(void)
560 {
561 	int cpu;
562 
563 	if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
564 		return;
565 
566 	for_each_online_cpu(cpu)
567 		clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
568 }
569 
570 /*
571  * hv_synic_cleanup - Cleanup routine for hv_synic_init().
572  */
573 void hv_synic_cleanup(void *arg)
574 {
575 	union hv_synic_sint shared_sint;
576 	union hv_synic_simp simp;
577 	union hv_synic_siefp siefp;
578 	union hv_synic_scontrol sctrl;
579 	int cpu = smp_processor_id();
580 
581 	if (!hv_context.synic_initialized)
582 		return;
583 
584 	/* Turn off clockevent device */
585 	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
586 		hv_ce_shutdown(hv_context.clk_evt[cpu]);
587 
588 	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
589 
590 	shared_sint.masked = 1;
591 
592 	/* Need to correctly cleanup in the case of SMP!!! */
593 	/* Disable the interrupt */
594 	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
595 
596 	rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
597 	simp.simp_enabled = 0;
598 	simp.base_simp_gpa = 0;
599 
600 	wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
601 
602 	rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
603 	siefp.siefp_enabled = 0;
604 	siefp.base_siefp_gpa = 0;
605 
606 	wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
607 
608 	/* Disable the global synic bit */
609 	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
610 	sctrl.enable = 0;
611 	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
612 }
613