1 /* 2 * VMware Detection code. 3 * 4 * Copyright (C) 2008, VMware, Inc. 5 * Author : Alok N Kataria <akataria@vmware.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 15 * NON INFRINGEMENT. See the GNU General Public License for more 16 * details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 */ 23 24 #include <linux/dmi.h> 25 #include <linux/init.h> 26 #include <linux/export.h> 27 #include <linux/clocksource.h> 28 #include <linux/cpu.h> 29 #include <linux/reboot.h> 30 #include <linux/static_call.h> 31 #include <asm/div64.h> 32 #include <asm/x86_init.h> 33 #include <asm/hypervisor.h> 34 #include <asm/timer.h> 35 #include <asm/apic.h> 36 #include <asm/vmware.h> 37 #include <asm/svm.h> 38 39 #undef pr_fmt 40 #define pr_fmt(fmt) "vmware: " fmt 41 42 #define CPUID_VMWARE_INFO_LEAF 0x40000000 43 #define CPUID_VMWARE_FEATURES_LEAF 0x40000010 44 45 #define GETVCPU_INFO_LEGACY_X2APIC BIT(3) 46 #define GETVCPU_INFO_VCPU_RESERVED BIT(31) 47 48 #define STEALCLOCK_NOT_AVAILABLE (-1) 49 #define STEALCLOCK_DISABLED 0 50 #define STEALCLOCK_ENABLED 1 51 52 struct vmware_steal_time { 53 union { 54 u64 clock; /* stolen time counter in units of vtsc */ 55 struct { 56 /* only for little-endian */ 57 u32 clock_low; 58 u32 clock_high; 59 }; 60 }; 61 u64 reserved[7]; 62 }; 63 64 static unsigned long vmware_tsc_khz __ro_after_init; 65 static u8 vmware_hypercall_mode __ro_after_init; 66 67 unsigned long vmware_hypercall_slow(unsigned long cmd, 68 unsigned long in1, unsigned long in3, 69 unsigned long in4, unsigned long in5, 70 u32 *out1, u32 *out2, u32 *out3, 71 u32 *out4, u32 *out5) 72 { 73 unsigned long out0, rbx, rcx, rdx, rsi, rdi; 74 75 switch (vmware_hypercall_mode) { 76 case CPUID_VMWARE_FEATURES_ECX_VMCALL: 77 asm_inline volatile ("vmcall" 78 : "=a" (out0), "=b" (rbx), "=c" (rcx), 79 "=d" (rdx), "=S" (rsi), "=D" (rdi) 80 : "a" (VMWARE_HYPERVISOR_MAGIC), 81 "b" (in1), 82 "c" (cmd), 83 "d" (in3), 84 "S" (in4), 85 "D" (in5) 86 : "cc", "memory"); 87 break; 88 case CPUID_VMWARE_FEATURES_ECX_VMMCALL: 89 asm_inline volatile ("vmmcall" 90 : "=a" (out0), "=b" (rbx), "=c" (rcx), 91 "=d" (rdx), "=S" (rsi), "=D" (rdi) 92 : "a" (VMWARE_HYPERVISOR_MAGIC), 93 "b" (in1), 94 "c" (cmd), 95 "d" (in3), 96 "S" (in4), 97 "D" (in5) 98 : "cc", "memory"); 99 break; 100 default: 101 asm_inline volatile ("movw %[port], %%dx; inl (%%dx), %%eax" 102 : "=a" (out0), "=b" (rbx), "=c" (rcx), 103 "=d" (rdx), "=S" (rsi), "=D" (rdi) 104 : [port] "i" (VMWARE_HYPERVISOR_PORT), 105 "a" (VMWARE_HYPERVISOR_MAGIC), 106 "b" (in1), 107 "c" (cmd), 108 "d" (in3), 109 "S" (in4), 110 "D" (in5) 111 : "cc", "memory"); 112 break; 113 } 114 115 if (out1) 116 *out1 = rbx; 117 if (out2) 118 *out2 = rcx; 119 if (out3) 120 *out3 = rdx; 121 if (out4) 122 *out4 = rsi; 123 if (out5) 124 *out5 = rdi; 125 126 return out0; 127 } 128 129 static inline int __vmware_platform(void) 130 { 131 u32 eax, ebx, ecx; 132 133 eax = vmware_hypercall3(VMWARE_CMD_GETVERSION, 0, &ebx, &ecx); 134 return eax != UINT_MAX && ebx == VMWARE_HYPERVISOR_MAGIC; 135 } 136 137 static unsigned long vmware_get_tsc_khz(void) 138 { 139 return vmware_tsc_khz; 140 } 141 142 #ifdef CONFIG_PARAVIRT 143 static struct cyc2ns_data vmware_cyc2ns __ro_after_init; 144 static bool vmw_sched_clock __initdata = true; 145 static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(64); 146 static bool has_steal_clock; 147 static bool steal_acc __initdata = true; /* steal time accounting */ 148 149 static __init int setup_vmw_sched_clock(char *s) 150 { 151 vmw_sched_clock = false; 152 return 0; 153 } 154 early_param("no-vmw-sched-clock", setup_vmw_sched_clock); 155 156 static __init int parse_no_stealacc(char *arg) 157 { 158 steal_acc = false; 159 return 0; 160 } 161 early_param("no-steal-acc", parse_no_stealacc); 162 163 static noinstr u64 vmware_sched_clock(void) 164 { 165 unsigned long long ns; 166 167 ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul, 168 vmware_cyc2ns.cyc2ns_shift); 169 ns -= vmware_cyc2ns.cyc2ns_offset; 170 return ns; 171 } 172 173 static void __init vmware_cyc2ns_setup(void) 174 { 175 struct cyc2ns_data *d = &vmware_cyc2ns; 176 unsigned long long tsc_now = rdtsc(); 177 178 clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift, 179 vmware_tsc_khz, NSEC_PER_MSEC, 0); 180 d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, 181 d->cyc2ns_shift); 182 183 pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset); 184 } 185 186 static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo) 187 { 188 u32 info; 189 190 return vmware_hypercall5(VMWARE_CMD_STEALCLOCK, 0, 0, addr_hi, addr_lo, 191 &info); 192 } 193 194 static bool stealclock_enable(phys_addr_t pa) 195 { 196 return vmware_cmd_stealclock(upper_32_bits(pa), 197 lower_32_bits(pa)) == STEALCLOCK_ENABLED; 198 } 199 200 static int __stealclock_disable(void) 201 { 202 return vmware_cmd_stealclock(0, 1); 203 } 204 205 static void stealclock_disable(void) 206 { 207 __stealclock_disable(); 208 } 209 210 static bool vmware_is_stealclock_available(void) 211 { 212 return __stealclock_disable() != STEALCLOCK_NOT_AVAILABLE; 213 } 214 215 /** 216 * vmware_steal_clock() - read the per-cpu steal clock 217 * @cpu: the cpu number whose steal clock we want to read 218 * 219 * The function reads the steal clock if we are on a 64-bit system, otherwise 220 * reads it in parts, checking that the high part didn't change in the 221 * meantime. 222 * 223 * Return: 224 * The steal clock reading in ns. 225 */ 226 static u64 vmware_steal_clock(int cpu) 227 { 228 struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu); 229 u64 clock; 230 231 if (IS_ENABLED(CONFIG_64BIT)) 232 clock = READ_ONCE(steal->clock); 233 else { 234 u32 initial_high, low, high; 235 236 do { 237 initial_high = READ_ONCE(steal->clock_high); 238 /* Do not reorder initial_high and high readings */ 239 virt_rmb(); 240 low = READ_ONCE(steal->clock_low); 241 /* Keep low reading in between */ 242 virt_rmb(); 243 high = READ_ONCE(steal->clock_high); 244 } while (initial_high != high); 245 246 clock = ((u64)high << 32) | low; 247 } 248 249 return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul, 250 vmware_cyc2ns.cyc2ns_shift); 251 } 252 253 static void vmware_register_steal_time(void) 254 { 255 int cpu = smp_processor_id(); 256 struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu); 257 258 if (!has_steal_clock) 259 return; 260 261 if (!stealclock_enable(slow_virt_to_phys(st))) { 262 has_steal_clock = false; 263 return; 264 } 265 266 pr_info("vmware-stealtime: cpu %d, pa %llx\n", 267 cpu, (unsigned long long) slow_virt_to_phys(st)); 268 } 269 270 static void vmware_disable_steal_time(void) 271 { 272 if (!has_steal_clock) 273 return; 274 275 stealclock_disable(); 276 } 277 278 static void vmware_guest_cpu_init(void) 279 { 280 if (has_steal_clock) 281 vmware_register_steal_time(); 282 } 283 284 static void vmware_pv_guest_cpu_reboot(void *unused) 285 { 286 vmware_disable_steal_time(); 287 } 288 289 static int vmware_pv_reboot_notify(struct notifier_block *nb, 290 unsigned long code, void *unused) 291 { 292 if (code == SYS_RESTART) 293 on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1); 294 return NOTIFY_DONE; 295 } 296 297 static struct notifier_block vmware_pv_reboot_nb = { 298 .notifier_call = vmware_pv_reboot_notify, 299 }; 300 301 #ifdef CONFIG_SMP 302 static void __init vmware_smp_prepare_boot_cpu(void) 303 { 304 vmware_guest_cpu_init(); 305 native_smp_prepare_boot_cpu(); 306 } 307 308 static int vmware_cpu_online(unsigned int cpu) 309 { 310 local_irq_disable(); 311 vmware_guest_cpu_init(); 312 local_irq_enable(); 313 return 0; 314 } 315 316 static int vmware_cpu_down_prepare(unsigned int cpu) 317 { 318 local_irq_disable(); 319 vmware_disable_steal_time(); 320 local_irq_enable(); 321 return 0; 322 } 323 #endif 324 325 static __init int activate_jump_labels(void) 326 { 327 if (has_steal_clock) { 328 static_key_slow_inc(¶virt_steal_enabled); 329 if (steal_acc) 330 static_key_slow_inc(¶virt_steal_rq_enabled); 331 } 332 333 return 0; 334 } 335 arch_initcall(activate_jump_labels); 336 337 static void __init vmware_paravirt_ops_setup(void) 338 { 339 pv_info.name = "VMware hypervisor"; 340 pv_ops.cpu.io_delay = paravirt_nop; 341 342 if (vmware_tsc_khz == 0) 343 return; 344 345 vmware_cyc2ns_setup(); 346 347 if (vmw_sched_clock) 348 paravirt_set_sched_clock(vmware_sched_clock); 349 350 if (vmware_is_stealclock_available()) { 351 has_steal_clock = true; 352 static_call_update(pv_steal_clock, vmware_steal_clock); 353 354 /* We use reboot notifier only to disable steal clock */ 355 register_reboot_notifier(&vmware_pv_reboot_nb); 356 357 #ifdef CONFIG_SMP 358 smp_ops.smp_prepare_boot_cpu = 359 vmware_smp_prepare_boot_cpu; 360 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 361 "x86/vmware:online", 362 vmware_cpu_online, 363 vmware_cpu_down_prepare) < 0) 364 pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n"); 365 #else 366 vmware_guest_cpu_init(); 367 #endif 368 } 369 } 370 #else 371 #define vmware_paravirt_ops_setup() do {} while (0) 372 #endif 373 374 /* 375 * VMware hypervisor takes care of exporting a reliable TSC to the guest. 376 * Still, due to timing difference when running on virtual cpus, the TSC can 377 * be marked as unstable in some cases. For example, the TSC sync check at 378 * bootup can fail due to a marginal offset between vcpus' TSCs (though the 379 * TSCs do not drift from each other). Also, the ACPI PM timer clocksource 380 * is not suitable as a watchdog when running on a hypervisor because the 381 * kernel may miss a wrap of the counter if the vcpu is descheduled for a 382 * long time. To skip these checks at runtime we set these capability bits, 383 * so that the kernel could just trust the hypervisor with providing a 384 * reliable virtual TSC that is suitable for timekeeping. 385 */ 386 static void __init vmware_set_capabilities(void) 387 { 388 setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC); 389 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 390 if (vmware_tsc_khz) 391 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); 392 if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL) 393 setup_force_cpu_cap(X86_FEATURE_VMCALL); 394 else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL) 395 setup_force_cpu_cap(X86_FEATURE_VMW_VMMCALL); 396 } 397 398 static void __init vmware_platform_setup(void) 399 { 400 u32 eax, ebx, ecx; 401 u64 lpj, tsc_khz; 402 403 eax = vmware_hypercall3(VMWARE_CMD_GETHZ, UINT_MAX, &ebx, &ecx); 404 405 if (ebx != UINT_MAX) { 406 lpj = tsc_khz = eax | (((u64)ebx) << 32); 407 do_div(tsc_khz, 1000); 408 WARN_ON(tsc_khz >> 32); 409 pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n", 410 (unsigned long) tsc_khz / 1000, 411 (unsigned long) tsc_khz % 1000); 412 413 if (!preset_lpj) { 414 do_div(lpj, HZ); 415 preset_lpj = lpj; 416 } 417 418 vmware_tsc_khz = tsc_khz; 419 x86_platform.calibrate_tsc = vmware_get_tsc_khz; 420 x86_platform.calibrate_cpu = vmware_get_tsc_khz; 421 422 #ifdef CONFIG_X86_LOCAL_APIC 423 /* Skip lapic calibration since we know the bus frequency. */ 424 lapic_timer_period = ecx / HZ; 425 pr_info("Host bus clock speed read from hypervisor : %u Hz\n", 426 ecx); 427 #endif 428 } else { 429 pr_warn("Failed to get TSC freq from the hypervisor\n"); 430 } 431 432 vmware_paravirt_ops_setup(); 433 434 #ifdef CONFIG_X86_IO_APIC 435 no_timer_check = 1; 436 #endif 437 438 vmware_set_capabilities(); 439 } 440 441 static u8 __init vmware_select_hypercall(void) 442 { 443 int eax, ebx, ecx, edx; 444 445 cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx); 446 return (ecx & (CPUID_VMWARE_FEATURES_ECX_VMMCALL | 447 CPUID_VMWARE_FEATURES_ECX_VMCALL)); 448 } 449 450 /* 451 * While checking the dmi string information, just checking the product 452 * serial key should be enough, as this will always have a VMware 453 * specific string when running under VMware hypervisor. 454 * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode 455 * intentionally defaults to 0. 456 */ 457 static u32 __init vmware_platform(void) 458 { 459 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 460 unsigned int eax; 461 unsigned int hyper_vendor_id[3]; 462 463 cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], 464 &hyper_vendor_id[1], &hyper_vendor_id[2]); 465 if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) { 466 if (eax >= CPUID_VMWARE_FEATURES_LEAF) 467 vmware_hypercall_mode = 468 vmware_select_hypercall(); 469 470 pr_info("hypercall mode: 0x%02x\n", 471 (unsigned int) vmware_hypercall_mode); 472 473 return CPUID_VMWARE_INFO_LEAF; 474 } 475 } else if (dmi_available && dmi_name_in_serial("VMware") && 476 __vmware_platform()) 477 return 1; 478 479 return 0; 480 } 481 482 /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */ 483 static bool __init vmware_legacy_x2apic_available(void) 484 { 485 u32 eax; 486 487 eax = vmware_hypercall1(VMWARE_CMD_GETVCPU_INFO, 0); 488 return !(eax & GETVCPU_INFO_VCPU_RESERVED) && 489 (eax & GETVCPU_INFO_LEGACY_X2APIC); 490 } 491 492 #ifdef CONFIG_INTEL_TDX_GUEST 493 /* 494 * TDCALL[TDG.VP.VMCALL] uses %rax (arg0) and %rcx (arg2). Therefore, 495 * we remap those registers to %r12 and %r13, respectively. 496 */ 497 unsigned long vmware_tdx_hypercall(unsigned long cmd, 498 unsigned long in1, unsigned long in3, 499 unsigned long in4, unsigned long in5, 500 u32 *out1, u32 *out2, u32 *out3, 501 u32 *out4, u32 *out5) 502 { 503 struct tdx_module_args args = {}; 504 505 if (!hypervisor_is_type(X86_HYPER_VMWARE)) { 506 pr_warn_once("Incorrect usage\n"); 507 return ULONG_MAX; 508 } 509 510 if (cmd & ~VMWARE_CMD_MASK) { 511 pr_warn_once("Out of range command %lx\n", cmd); 512 return ULONG_MAX; 513 } 514 515 args.rbx = in1; 516 args.rdx = in3; 517 args.rsi = in4; 518 args.rdi = in5; 519 args.r10 = VMWARE_TDX_VENDOR_LEAF; 520 args.r11 = VMWARE_TDX_HCALL_FUNC; 521 args.r12 = VMWARE_HYPERVISOR_MAGIC; 522 args.r13 = cmd; 523 /* CPL */ 524 args.r15 = 0; 525 526 __tdx_hypercall(&args); 527 528 if (out1) 529 *out1 = args.rbx; 530 if (out2) 531 *out2 = args.r13; 532 if (out3) 533 *out3 = args.rdx; 534 if (out4) 535 *out4 = args.rsi; 536 if (out5) 537 *out5 = args.rdi; 538 539 return args.r12; 540 } 541 EXPORT_SYMBOL_GPL(vmware_tdx_hypercall); 542 #endif 543 544 #ifdef CONFIG_AMD_MEM_ENCRYPT 545 static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb, 546 struct pt_regs *regs) 547 { 548 /* Copy VMWARE specific Hypercall parameters to the GHCB */ 549 ghcb_set_rip(ghcb, regs->ip); 550 ghcb_set_rbx(ghcb, regs->bx); 551 ghcb_set_rcx(ghcb, regs->cx); 552 ghcb_set_rdx(ghcb, regs->dx); 553 ghcb_set_rsi(ghcb, regs->si); 554 ghcb_set_rdi(ghcb, regs->di); 555 ghcb_set_rbp(ghcb, regs->bp); 556 } 557 558 static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) 559 { 560 if (!(ghcb_rbx_is_valid(ghcb) && 561 ghcb_rcx_is_valid(ghcb) && 562 ghcb_rdx_is_valid(ghcb) && 563 ghcb_rsi_is_valid(ghcb) && 564 ghcb_rdi_is_valid(ghcb) && 565 ghcb_rbp_is_valid(ghcb))) 566 return false; 567 568 regs->bx = ghcb_get_rbx(ghcb); 569 regs->cx = ghcb_get_rcx(ghcb); 570 regs->dx = ghcb_get_rdx(ghcb); 571 regs->si = ghcb_get_rsi(ghcb); 572 regs->di = ghcb_get_rdi(ghcb); 573 regs->bp = ghcb_get_rbp(ghcb); 574 575 return true; 576 } 577 #endif 578 579 const __initconst struct hypervisor_x86 x86_hyper_vmware = { 580 .name = "VMware", 581 .detect = vmware_platform, 582 .type = X86_HYPER_VMWARE, 583 .init.init_platform = vmware_platform_setup, 584 .init.x2apic_available = vmware_legacy_x2apic_available, 585 #ifdef CONFIG_AMD_MEM_ENCRYPT 586 .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare, 587 .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish, 588 #endif 589 }; 590