1 /*- 2 * Copyright (c) 1998-2003 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_clock.h" 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/cpu.h> 36 #include <sys/limits.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 #include <sys/sysctl.h> 40 #include <sys/time.h> 41 #include <sys/timetc.h> 42 #include <sys/kernel.h> 43 #include <sys/power.h> 44 #include <sys/smp.h> 45 #include <sys/vdso.h> 46 #include <machine/clock.h> 47 #include <machine/cputypes.h> 48 #include <machine/md_var.h> 49 #include <machine/specialreg.h> 50 51 #include "cpufreq_if.h" 52 53 uint64_t tsc_freq; 54 int tsc_is_invariant; 55 int tsc_perf_stat; 56 57 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 58 59 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 60 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 61 TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); 62 63 #ifdef SMP 64 static int smp_tsc; 65 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 66 "Indicates whether the TSC is safe to use in SMP mode"); 67 TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); 68 #endif 69 70 static int tsc_disabled; 71 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 72 "Disable x86 Time Stamp Counter"); 73 TUNABLE_INT("machdep.disable_tsc", &tsc_disabled); 74 75 static int tsc_skip_calibration; 76 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 77 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 78 TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration); 79 80 static void tsc_freq_changed(void *arg, const struct cf_level *level, 81 int status); 82 static void tsc_freq_changing(void *arg, const struct cf_level *level, 83 int *status); 84 static unsigned tsc_get_timecount(struct timecounter *tc); 85 static unsigned tsc_get_timecount_low(struct timecounter *tc); 86 static void tsc_levels_changed(void *arg, int unit); 87 88 static struct timecounter tsc_timecounter = { 89 tsc_get_timecount, /* get_timecount */ 90 0, /* no poll_pps */ 91 ~0u, /* counter_mask */ 92 0, /* frequency */ 93 "TSC", /* name */ 94 800, /* quality (adjusted in code) */ 95 }; 96 97 #define VMW_HVMAGIC 0x564d5868 98 #define VMW_HVPORT 0x5658 99 #define VMW_HVCMD_GETVERSION 10 100 #define VMW_HVCMD_GETHZ 45 101 102 static __inline void 103 vmware_hvcall(u_int cmd, u_int *p) 104 { 105 106 __asm __volatile("inl %w3, %0" 107 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 108 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 109 : "memory"); 110 } 111 112 static int 113 tsc_freq_vmware(void) 114 { 115 char hv_sig[13]; 116 u_int regs[4]; 117 char *p; 118 u_int hv_high; 119 int i; 120 121 /* 122 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 123 * http://lkml.org/lkml/2008/10/1/246 124 * 125 * KB1009458: Mechanisms to determine if software is running in 126 * a VMware virtual machine 127 * http://kb.vmware.com/kb/1009458 128 */ 129 hv_high = 0; 130 if ((cpu_feature2 & CPUID2_HV) != 0) { 131 do_cpuid(0x40000000, regs); 132 hv_high = regs[0]; 133 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 134 memcpy(p, ®s[i], sizeof(regs[i])); 135 *p = '\0'; 136 if (bootverbose) { 137 /* 138 * HV vendor ID string 139 * ------------+-------------- 140 * KVM "KVMKVMKVM" 141 * Microsoft "Microsoft Hv" 142 * VMware "VMwareVMware" 143 * Xen "XenVMMXenVMM" 144 */ 145 printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 146 } 147 if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 148 return (0); 149 } else { 150 p = getenv("smbios.system.serial"); 151 if (p == NULL) 152 return (0); 153 if (strncmp(p, "VMware-", 7) != 0 && 154 strncmp(p, "VMW", 3) != 0) { 155 freeenv(p); 156 return (0); 157 } 158 freeenv(p); 159 vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 160 if (regs[1] != VMW_HVMAGIC) 161 return (0); 162 } 163 if (hv_high >= 0x40000010) { 164 do_cpuid(0x40000010, regs); 165 tsc_freq = regs[0] * 1000; 166 } else { 167 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 168 if (regs[1] != UINT_MAX) 169 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 170 } 171 tsc_is_invariant = 1; 172 return (1); 173 } 174 175 static void 176 tsc_freq_intel(void) 177 { 178 char brand[48]; 179 u_int regs[4]; 180 uint64_t freq; 181 char *p; 182 u_int i; 183 184 /* 185 * Intel Processor Identification and the CPUID Instruction 186 * Application Note 485. 187 * http://www.intel.com/assets/pdf/appnote/241618.pdf 188 */ 189 if (cpu_exthigh >= 0x80000004) { 190 p = brand; 191 for (i = 0x80000002; i < 0x80000005; i++) { 192 do_cpuid(i, regs); 193 memcpy(p, regs, sizeof(regs)); 194 p += sizeof(regs); 195 } 196 p = NULL; 197 for (i = 0; i < sizeof(brand) - 1; i++) 198 if (brand[i] == 'H' && brand[i + 1] == 'z') 199 p = brand + i; 200 if (p != NULL) { 201 p -= 5; 202 switch (p[4]) { 203 case 'M': 204 i = 1; 205 break; 206 case 'G': 207 i = 1000; 208 break; 209 case 'T': 210 i = 1000000; 211 break; 212 default: 213 return; 214 } 215 #define C2D(c) ((c) - '0') 216 if (p[1] == '.') { 217 freq = C2D(p[0]) * 1000; 218 freq += C2D(p[2]) * 100; 219 freq += C2D(p[3]) * 10; 220 freq *= i * 1000; 221 } else { 222 freq = C2D(p[0]) * 1000; 223 freq += C2D(p[1]) * 100; 224 freq += C2D(p[2]) * 10; 225 freq += C2D(p[3]); 226 freq *= i * 1000000; 227 } 228 #undef C2D 229 tsc_freq = freq; 230 } 231 } 232 } 233 234 static void 235 probe_tsc_freq(void) 236 { 237 u_int regs[4]; 238 uint64_t tsc1, tsc2; 239 240 if (cpu_high >= 6) { 241 do_cpuid(6, regs); 242 if ((regs[2] & CPUID_PERF_STAT) != 0) { 243 /* 244 * XXX Some emulators expose host CPUID without actual 245 * support for these MSRs. We must test whether they 246 * really work. 247 */ 248 wrmsr(MSR_MPERF, 0); 249 wrmsr(MSR_APERF, 0); 250 DELAY(10); 251 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 252 tsc_perf_stat = 1; 253 } 254 } 255 256 if (tsc_freq_vmware()) 257 return; 258 259 switch (cpu_vendor_id) { 260 case CPU_VENDOR_AMD: 261 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 262 (vm_guest == VM_GUEST_NO && 263 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 264 tsc_is_invariant = 1; 265 break; 266 case CPU_VENDOR_INTEL: 267 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 268 (vm_guest == VM_GUEST_NO && 269 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 270 CPUID_TO_MODEL(cpu_id) >= 0xe) || 271 (CPUID_TO_FAMILY(cpu_id) == 0xf && 272 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 273 tsc_is_invariant = 1; 274 break; 275 case CPU_VENDOR_CENTAUR: 276 if (vm_guest == VM_GUEST_NO && 277 CPUID_TO_FAMILY(cpu_id) == 0x6 && 278 CPUID_TO_MODEL(cpu_id) >= 0xf && 279 (rdmsr(0x1203) & 0x100000000ULL) == 0) 280 tsc_is_invariant = 1; 281 break; 282 } 283 284 if (tsc_skip_calibration) { 285 if (cpu_vendor_id == CPU_VENDOR_INTEL) 286 tsc_freq_intel(); 287 return; 288 } 289 290 if (bootverbose) 291 printf("Calibrating TSC clock ... "); 292 tsc1 = rdtsc(); 293 DELAY(1000000); 294 tsc2 = rdtsc(); 295 tsc_freq = tsc2 - tsc1; 296 if (bootverbose) 297 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 298 } 299 300 void 301 init_TSC(void) 302 { 303 304 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 305 return; 306 307 probe_tsc_freq(); 308 309 /* 310 * Inform CPU accounting about our boot-time clock rate. This will 311 * be updated if someone loads a cpufreq driver after boot that 312 * discovers a new max frequency. 313 */ 314 if (tsc_freq != 0) 315 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 316 317 if (tsc_is_invariant) 318 return; 319 320 /* Register to find out about changes in CPU frequency. */ 321 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 322 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 323 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 324 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 325 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 326 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 327 } 328 329 #ifdef SMP 330 331 #define TSC_READ(x) \ 332 static void \ 333 tsc_read_##x(void *arg) \ 334 { \ 335 uint32_t *tsc = arg; \ 336 u_int cpu = PCPU_GET(cpuid); \ 337 \ 338 tsc[cpu * 3 + x] = rdtsc32(); \ 339 } 340 TSC_READ(0) 341 TSC_READ(1) 342 TSC_READ(2) 343 #undef TSC_READ 344 345 #define N 1000 346 347 static void 348 comp_smp_tsc(void *arg) 349 { 350 uint32_t *tsc; 351 int32_t d1, d2; 352 u_int cpu = PCPU_GET(cpuid); 353 u_int i, j, size; 354 355 size = (mp_maxid + 1) * 3; 356 for (i = 0, tsc = arg; i < N; i++, tsc += size) 357 CPU_FOREACH(j) { 358 if (j == cpu) 359 continue; 360 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 361 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 362 if (d1 <= 0 || d2 <= 0) { 363 smp_tsc = 0; 364 return; 365 } 366 } 367 } 368 369 static int 370 test_smp_tsc(void) 371 { 372 uint32_t *data, *tsc; 373 u_int i, size; 374 375 if (!smp_tsc && !tsc_is_invariant) 376 return (-100); 377 size = (mp_maxid + 1) * 3; 378 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 379 for (i = 0, tsc = data; i < N; i++, tsc += size) 380 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 381 smp_tsc = 1; /* XXX */ 382 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 383 smp_no_rendevous_barrier, data); 384 free(data, M_TEMP); 385 if (bootverbose) 386 printf("SMP: %sed TSC synchronization test\n", 387 smp_tsc ? "pass" : "fail"); 388 if (smp_tsc && tsc_is_invariant) { 389 switch (cpu_vendor_id) { 390 case CPU_VENDOR_AMD: 391 /* 392 * Starting with Family 15h processors, TSC clock 393 * source is in the north bridge. Check whether 394 * we have a single-socket/multi-core platform. 395 * XXX Need more work for complex cases. 396 */ 397 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 398 (amd_feature2 & AMDID2_CMP) == 0 || 399 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 400 break; 401 return (1000); 402 case CPU_VENDOR_INTEL: 403 /* 404 * XXX Assume Intel platforms have synchronized TSCs. 405 */ 406 return (1000); 407 } 408 return (800); 409 } 410 return (-100); 411 } 412 413 #undef N 414 415 #endif /* SMP */ 416 417 static void 418 init_TSC_tc(void) 419 { 420 uint64_t max_freq; 421 int shift; 422 423 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 424 return; 425 426 /* 427 * Limit timecounter frequency to fit in an int and prevent it from 428 * overflowing too fast. 429 */ 430 max_freq = UINT_MAX; 431 432 /* 433 * We can not use the TSC if we support APM. Precise timekeeping 434 * on an APM'ed machine is at best a fools pursuit, since 435 * any and all of the time spent in various SMM code can't 436 * be reliably accounted for. Reading the RTC is your only 437 * source of reliable time info. The i8254 loses too, of course, 438 * but we need to have some kind of time... 439 * We don't know at this point whether APM is going to be used 440 * or not, nor when it might be activated. Play it safe. 441 */ 442 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 443 tsc_timecounter.tc_quality = -1000; 444 if (bootverbose) 445 printf("TSC timecounter disabled: APM enabled.\n"); 446 goto init; 447 } 448 449 /* 450 * We cannot use the TSC if it stops incrementing in deep sleep. 451 * Currently only Intel CPUs are known for this problem unless 452 * the invariant TSC bit is set. 453 */ 454 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 455 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 456 tsc_timecounter.tc_quality = -1000; 457 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 458 if (bootverbose) 459 printf("TSC timecounter disabled: C3 enabled.\n"); 460 goto init; 461 } 462 463 #ifdef SMP 464 /* 465 * We can not use the TSC in SMP mode unless the TSCs on all CPUs are 466 * synchronized. If the user is sure that the system has synchronized 467 * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value. 468 * We also limit the frequency even lower to avoid "temporal anomalies" 469 * as much as possible. The TSC seems unreliable in virtualized SMP 470 * environments, so it is set to a negative quality in those cases. 471 */ 472 if (smp_cpus > 1) { 473 if (vm_guest != 0) { 474 tsc_timecounter.tc_quality = -100; 475 } else { 476 tsc_timecounter.tc_quality = test_smp_tsc(); 477 max_freq >>= 8; 478 } 479 } else 480 #endif 481 if (tsc_is_invariant) 482 tsc_timecounter.tc_quality = 1000; 483 484 init: 485 for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++) 486 ; 487 if (shift > 0) { 488 tsc_timecounter.tc_get_timecount = tsc_get_timecount_low; 489 tsc_timecounter.tc_name = "TSC-low"; 490 if (bootverbose) 491 printf("TSC timecounter discards lower %d bit(s)\n", 492 shift); 493 } 494 if (tsc_freq != 0) { 495 tsc_timecounter.tc_frequency = tsc_freq >> shift; 496 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 497 tc_init(&tsc_timecounter); 498 } 499 } 500 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 501 502 /* 503 * When cpufreq levels change, find out about the (new) max frequency. We 504 * use this to update CPU accounting in case it got a lower estimate at boot. 505 */ 506 static void 507 tsc_levels_changed(void *arg, int unit) 508 { 509 device_t cf_dev; 510 struct cf_level *levels; 511 int count, error; 512 uint64_t max_freq; 513 514 /* Only use values from the first CPU, assuming all are equal. */ 515 if (unit != 0) 516 return; 517 518 /* Find the appropriate cpufreq device instance. */ 519 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 520 if (cf_dev == NULL) { 521 printf("tsc_levels_changed() called but no cpufreq device?\n"); 522 return; 523 } 524 525 /* Get settings from the device and find the max frequency. */ 526 count = 64; 527 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 528 if (levels == NULL) 529 return; 530 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 531 if (error == 0 && count != 0) { 532 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 533 set_cputicker(rdtsc, max_freq, 1); 534 } else 535 printf("tsc_levels_changed: no max freq found\n"); 536 free(levels, M_TEMP); 537 } 538 539 /* 540 * If the TSC timecounter is in use, veto the pending change. It may be 541 * possible in the future to handle a dynamically-changing timecounter rate. 542 */ 543 static void 544 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 545 { 546 547 if (*status != 0 || timecounter != &tsc_timecounter) 548 return; 549 550 printf("timecounter TSC must not be in use when " 551 "changing frequencies; change denied\n"); 552 *status = EBUSY; 553 } 554 555 /* Update TSC freq with the value indicated by the caller. */ 556 static void 557 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 558 { 559 uint64_t freq; 560 561 /* If there was an error during the transition, don't do anything. */ 562 if (tsc_disabled || status != 0) 563 return; 564 565 /* Total setting for this level gives the new frequency in MHz. */ 566 freq = (uint64_t)level->total_set.freq * 1000000; 567 atomic_store_rel_64(&tsc_freq, freq); 568 tsc_timecounter.tc_frequency = 569 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 570 } 571 572 static int 573 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 574 { 575 int error; 576 uint64_t freq; 577 578 freq = atomic_load_acq_64(&tsc_freq); 579 if (freq == 0) 580 return (EOPNOTSUPP); 581 error = sysctl_handle_64(oidp, &freq, 0, req); 582 if (error == 0 && req->newptr != NULL) { 583 atomic_store_rel_64(&tsc_freq, freq); 584 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 585 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 586 } 587 return (error); 588 } 589 590 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 591 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 592 593 static u_int 594 tsc_get_timecount(struct timecounter *tc __unused) 595 { 596 597 return (rdtsc32()); 598 } 599 600 static u_int 601 tsc_get_timecount_low(struct timecounter *tc) 602 { 603 uint32_t rv; 604 605 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 606 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 607 return (rv); 608 } 609 610 uint32_t 611 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th) 612 { 613 614 vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 615 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 616 return (timecounter == &tsc_timecounter); 617 } 618 619 #ifdef COMPAT_FREEBSD32 620 uint32_t 621 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 622 { 623 624 vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 625 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 626 return (timecounter == &tsc_timecounter); 627 } 628 #endif 629