xref: /linux/arch/x86/kernel/cpu/vmware.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * VMware Detection code.
3  *
4  * Copyright (C) 2008, VMware, Inc.
5  * Author : Alok N Kataria <akataria@vmware.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15  * NON INFRINGEMENT.  See the GNU General Public License for more
16  * details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21  *
22  */
23 
24 #include <linux/dmi.h>
25 #include <linux/module.h>
26 #include <asm/div64.h>
27 #include <asm/x86_init.h>
28 #include <asm/hypervisor.h>
29 
30 #define CPUID_VMWARE_INFO_LEAF	0x40000000
31 #define VMWARE_HYPERVISOR_MAGIC	0x564D5868
32 #define VMWARE_HYPERVISOR_PORT	0x5658
33 
34 #define VMWARE_PORT_CMD_GETVERSION	10
35 #define VMWARE_PORT_CMD_GETHZ		45
36 #define VMWARE_PORT_CMD_GETVCPU_INFO	68
37 #define VMWARE_PORT_CMD_LEGACY_X2APIC	3
38 #define VMWARE_PORT_CMD_VCPU_RESERVED	31
39 
40 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx)				\
41 	__asm__("inl (%%dx)" :						\
42 			"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :	\
43 			"0"(VMWARE_HYPERVISOR_MAGIC),			\
44 			"1"(VMWARE_PORT_CMD_##cmd),			\
45 			"2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) :	\
46 			"memory");
47 
48 static inline int __vmware_platform(void)
49 {
50 	uint32_t eax, ebx, ecx, edx;
51 	VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx);
52 	return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
53 }
54 
55 static unsigned long vmware_get_tsc_khz(void)
56 {
57 	uint64_t tsc_hz, lpj;
58 	uint32_t eax, ebx, ecx, edx;
59 
60 	VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
61 
62 	tsc_hz = eax | (((uint64_t)ebx) << 32);
63 	do_div(tsc_hz, 1000);
64 	BUG_ON(tsc_hz >> 32);
65 	printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
66 			 (unsigned long) tsc_hz / 1000,
67 			 (unsigned long) tsc_hz % 1000);
68 
69 	if (!preset_lpj) {
70 		lpj = ((u64)tsc_hz * 1000);
71 		do_div(lpj, HZ);
72 		preset_lpj = lpj;
73 	}
74 
75 	return tsc_hz;
76 }
77 
78 static void __init vmware_platform_setup(void)
79 {
80 	uint32_t eax, ebx, ecx, edx;
81 
82 	VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
83 
84 	if (ebx != UINT_MAX)
85 		x86_platform.calibrate_tsc = vmware_get_tsc_khz;
86 	else
87 		printk(KERN_WARNING
88 		       "Failed to get TSC freq from the hypervisor\n");
89 }
90 
91 /*
92  * While checking the dmi string information, just checking the product
93  * serial key should be enough, as this will always have a VMware
94  * specific string when running under VMware hypervisor.
95  */
96 static uint32_t __init vmware_platform(void)
97 {
98 	if (cpu_has_hypervisor) {
99 		unsigned int eax;
100 		unsigned int hyper_vendor_id[3];
101 
102 		cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
103 		      &hyper_vendor_id[1], &hyper_vendor_id[2]);
104 		if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
105 			return CPUID_VMWARE_INFO_LEAF;
106 	} else if (dmi_available && dmi_name_in_serial("VMware") &&
107 		   __vmware_platform())
108 		return 1;
109 
110 	return 0;
111 }
112 
113 /*
114  * VMware hypervisor takes care of exporting a reliable TSC to the guest.
115  * Still, due to timing difference when running on virtual cpus, the TSC can
116  * be marked as unstable in some cases. For example, the TSC sync check at
117  * bootup can fail due to a marginal offset between vcpus' TSCs (though the
118  * TSCs do not drift from each other).  Also, the ACPI PM timer clocksource
119  * is not suitable as a watchdog when running on a hypervisor because the
120  * kernel may miss a wrap of the counter if the vcpu is descheduled for a
121  * long time. To skip these checks at runtime we set these capability bits,
122  * so that the kernel could just trust the hypervisor with providing a
123  * reliable virtual TSC that is suitable for timekeeping.
124  */
125 static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
126 {
127 	set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
128 	set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
129 }
130 
131 /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
132 static bool __init vmware_legacy_x2apic_available(void)
133 {
134 	uint32_t eax, ebx, ecx, edx;
135 	VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx);
136 	return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 &&
137 	       (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
138 }
139 
140 const __refconst struct hypervisor_x86 x86_hyper_vmware = {
141 	.name			= "VMware",
142 	.detect			= vmware_platform,
143 	.set_cpu_features	= vmware_set_cpu_features,
144 	.init_platform		= vmware_platform_setup,
145 	.x2apic_available	= vmware_legacy_x2apic_available,
146 };
147 EXPORT_SYMBOL(x86_hyper_vmware);
148