1 /*- 2 * Copyright (c) 1998-2003 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_clock.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/cpu.h> 35 #include <sys/limits.h> 36 #include <sys/malloc.h> 37 #include <sys/systm.h> 38 #include <sys/sysctl.h> 39 #include <sys/time.h> 40 #include <sys/timetc.h> 41 #include <sys/kernel.h> 42 #include <sys/power.h> 43 #include <sys/smp.h> 44 #include <machine/clock.h> 45 #include <machine/cputypes.h> 46 #include <machine/md_var.h> 47 #include <machine/specialreg.h> 48 49 #include "cpufreq_if.h" 50 51 uint64_t tsc_freq; 52 int tsc_is_invariant; 53 int tsc_perf_stat; 54 55 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 56 57 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 58 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 59 TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); 60 61 #ifdef SMP 62 static int smp_tsc; 63 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 64 "Indicates whether the TSC is safe to use in SMP mode"); 65 TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); 66 #endif 67 68 static int tsc_disabled; 69 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 70 "Disable x86 Time Stamp Counter"); 71 TUNABLE_INT("machdep.disable_tsc", &tsc_disabled); 72 73 static int tsc_skip_calibration; 74 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 75 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 76 TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration); 77 78 static void tsc_freq_changed(void *arg, const struct cf_level *level, 79 int status); 80 static void tsc_freq_changing(void *arg, const struct cf_level *level, 81 int *status); 82 static unsigned tsc_get_timecount(struct timecounter *tc); 83 static unsigned tsc_get_timecount_low(struct timecounter *tc); 84 static void tsc_levels_changed(void *arg, int unit); 85 86 static struct timecounter tsc_timecounter = { 87 tsc_get_timecount, /* get_timecount */ 88 0, /* no poll_pps */ 89 ~0u, /* counter_mask */ 90 0, /* frequency */ 91 "TSC", /* name */ 92 800, /* quality (adjusted in code) */ 93 }; 94 95 #define VMW_HVMAGIC 0x564d5868 96 #define VMW_HVPORT 0x5658 97 #define VMW_HVCMD_GETVERSION 10 98 #define VMW_HVCMD_GETHZ 45 99 100 static __inline void 101 vmware_hvcall(u_int cmd, u_int *p) 102 { 103 104 __asm __volatile("inl %w3, %0" 105 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 106 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 107 : "memory"); 108 } 109 110 static int 111 tsc_freq_vmware(void) 112 { 113 char hv_sig[13]; 114 u_int regs[4]; 115 char *p; 116 u_int hv_high; 117 int i; 118 119 /* 120 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 121 * http://lkml.org/lkml/2008/10/1/246 122 * 123 * KB1009458: Mechanisms to determine if software is running in 124 * a VMware virtual machine 125 * http://kb.vmware.com/kb/1009458 126 */ 127 hv_high = 0; 128 if ((cpu_feature2 & CPUID2_HV) != 0) { 129 do_cpuid(0x40000000, regs); 130 hv_high = regs[0]; 131 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 132 memcpy(p, ®s[i], sizeof(regs[i])); 133 *p = '\0'; 134 if (bootverbose) { 135 /* 136 * HV vendor ID string 137 * ------------+-------------- 138 * KVM "KVMKVMKVM" 139 * Microsoft "Microsoft Hv" 140 * VMware "VMwareVMware" 141 * Xen "XenVMMXenVMM" 142 */ 143 printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 144 } 145 if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 146 return (0); 147 } else { 148 p = getenv("smbios.system.serial"); 149 if (p == NULL) 150 return (0); 151 if (strncmp(p, "VMware-", 7) != 0 && 152 strncmp(p, "VMW", 3) != 0) { 153 freeenv(p); 154 return (0); 155 } 156 freeenv(p); 157 vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 158 if (regs[1] != VMW_HVMAGIC) 159 return (0); 160 } 161 if (hv_high >= 0x40000010) { 162 do_cpuid(0x40000010, regs); 163 tsc_freq = regs[0] * 1000; 164 } else { 165 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 166 if (regs[1] != UINT_MAX) 167 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 168 } 169 tsc_is_invariant = 1; 170 return (1); 171 } 172 173 static void 174 tsc_freq_intel(void) 175 { 176 char brand[48]; 177 u_int regs[4]; 178 uint64_t freq; 179 char *p; 180 u_int i; 181 182 /* 183 * Intel Processor Identification and the CPUID Instruction 184 * Application Note 485. 185 * http://www.intel.com/assets/pdf/appnote/241618.pdf 186 */ 187 if (cpu_exthigh >= 0x80000004) { 188 p = brand; 189 for (i = 0x80000002; i < 0x80000005; i++) { 190 do_cpuid(i, regs); 191 memcpy(p, regs, sizeof(regs)); 192 p += sizeof(regs); 193 } 194 p = NULL; 195 for (i = 0; i < sizeof(brand) - 1; i++) 196 if (brand[i] == 'H' && brand[i + 1] == 'z') 197 p = brand + i; 198 if (p != NULL) { 199 p -= 5; 200 switch (p[4]) { 201 case 'M': 202 i = 1; 203 break; 204 case 'G': 205 i = 1000; 206 break; 207 case 'T': 208 i = 1000000; 209 break; 210 default: 211 return; 212 } 213 #define C2D(c) ((c) - '0') 214 if (p[1] == '.') { 215 freq = C2D(p[0]) * 1000; 216 freq += C2D(p[2]) * 100; 217 freq += C2D(p[3]) * 10; 218 freq *= i * 1000; 219 } else { 220 freq = C2D(p[0]) * 1000; 221 freq += C2D(p[1]) * 100; 222 freq += C2D(p[2]) * 10; 223 freq += C2D(p[3]); 224 freq *= i * 1000000; 225 } 226 #undef C2D 227 tsc_freq = freq; 228 } 229 } 230 } 231 232 static void 233 probe_tsc_freq(void) 234 { 235 u_int regs[4]; 236 uint64_t tsc1, tsc2; 237 238 if (cpu_high >= 6) { 239 do_cpuid(6, regs); 240 if ((regs[2] & CPUID_PERF_STAT) != 0) { 241 /* 242 * XXX Some emulators expose host CPUID without actual 243 * support for these MSRs. We must test whether they 244 * really work. 245 */ 246 wrmsr(MSR_MPERF, 0); 247 wrmsr(MSR_APERF, 0); 248 DELAY(10); 249 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 250 tsc_perf_stat = 1; 251 } 252 } 253 254 if (tsc_freq_vmware()) 255 return; 256 257 switch (cpu_vendor_id) { 258 case CPU_VENDOR_AMD: 259 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 260 (vm_guest == VM_GUEST_NO && 261 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 262 tsc_is_invariant = 1; 263 break; 264 case CPU_VENDOR_INTEL: 265 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 266 (vm_guest == VM_GUEST_NO && 267 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 268 CPUID_TO_MODEL(cpu_id) >= 0xe) || 269 (CPUID_TO_FAMILY(cpu_id) == 0xf && 270 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 271 tsc_is_invariant = 1; 272 break; 273 case CPU_VENDOR_CENTAUR: 274 if (vm_guest == VM_GUEST_NO && 275 CPUID_TO_FAMILY(cpu_id) == 0x6 && 276 CPUID_TO_MODEL(cpu_id) >= 0xf && 277 (rdmsr(0x1203) & 0x100000000ULL) == 0) 278 tsc_is_invariant = 1; 279 break; 280 } 281 282 if (tsc_skip_calibration) { 283 if (cpu_vendor_id == CPU_VENDOR_INTEL) 284 tsc_freq_intel(); 285 return; 286 } 287 288 if (bootverbose) 289 printf("Calibrating TSC clock ... "); 290 tsc1 = rdtsc(); 291 DELAY(1000000); 292 tsc2 = rdtsc(); 293 tsc_freq = tsc2 - tsc1; 294 if (bootverbose) 295 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 296 } 297 298 void 299 init_TSC(void) 300 { 301 302 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 303 return; 304 305 probe_tsc_freq(); 306 307 /* 308 * Inform CPU accounting about our boot-time clock rate. This will 309 * be updated if someone loads a cpufreq driver after boot that 310 * discovers a new max frequency. 311 */ 312 if (tsc_freq != 0) 313 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 314 315 if (tsc_is_invariant) 316 return; 317 318 /* Register to find out about changes in CPU frequency. */ 319 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 320 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 321 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 322 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 323 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 324 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 325 } 326 327 #ifdef SMP 328 329 #define TSC_READ(x) \ 330 static void \ 331 tsc_read_##x(void *arg) \ 332 { \ 333 uint32_t *tsc = arg; \ 334 u_int cpu = PCPU_GET(cpuid); \ 335 \ 336 tsc[cpu * 3 + x] = rdtsc32(); \ 337 } 338 TSC_READ(0) 339 TSC_READ(1) 340 TSC_READ(2) 341 #undef TSC_READ 342 343 #define N 1000 344 345 static void 346 comp_smp_tsc(void *arg) 347 { 348 uint32_t *tsc; 349 int32_t d1, d2; 350 u_int cpu = PCPU_GET(cpuid); 351 u_int i, j, size; 352 353 size = (mp_maxid + 1) * 3; 354 for (i = 0, tsc = arg; i < N; i++, tsc += size) 355 CPU_FOREACH(j) { 356 if (j == cpu) 357 continue; 358 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 359 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 360 if (d1 <= 0 || d2 <= 0) { 361 smp_tsc = 0; 362 return; 363 } 364 } 365 } 366 367 static int 368 test_smp_tsc(void) 369 { 370 uint32_t *data, *tsc; 371 u_int i, size; 372 373 if (!smp_tsc && !tsc_is_invariant) 374 return (-100); 375 size = (mp_maxid + 1) * 3; 376 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 377 for (i = 0, tsc = data; i < N; i++, tsc += size) 378 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 379 smp_tsc = 1; /* XXX */ 380 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 381 smp_no_rendevous_barrier, data); 382 free(data, M_TEMP); 383 if (bootverbose) 384 printf("SMP: %sed TSC synchronization test\n", 385 smp_tsc ? "pass" : "fail"); 386 if (smp_tsc && tsc_is_invariant) { 387 switch (cpu_vendor_id) { 388 case CPU_VENDOR_AMD: 389 /* 390 * Starting with Family 15h processors, TSC clock 391 * source is in the north bridge. Check whether 392 * we have a single-socket/multi-core platform. 393 * XXX Need more work for complex cases. 394 */ 395 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 396 (amd_feature2 & AMDID2_CMP) == 0 || 397 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 398 break; 399 return (1000); 400 case CPU_VENDOR_INTEL: 401 /* 402 * XXX Assume Intel platforms have synchronized TSCs. 403 */ 404 return (1000); 405 } 406 return (800); 407 } 408 return (-100); 409 } 410 411 #undef N 412 413 #endif /* SMP */ 414 415 static void 416 init_TSC_tc(void) 417 { 418 uint64_t max_freq; 419 int shift; 420 421 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 422 return; 423 424 /* 425 * Limit timecounter frequency to fit in an int and prevent it from 426 * overflowing too fast. 427 */ 428 max_freq = UINT_MAX; 429 430 /* 431 * We can not use the TSC if we support APM. Precise timekeeping 432 * on an APM'ed machine is at best a fools pursuit, since 433 * any and all of the time spent in various SMM code can't 434 * be reliably accounted for. Reading the RTC is your only 435 * source of reliable time info. The i8254 loses too, of course, 436 * but we need to have some kind of time... 437 * We don't know at this point whether APM is going to be used 438 * or not, nor when it might be activated. Play it safe. 439 */ 440 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 441 tsc_timecounter.tc_quality = -1000; 442 if (bootverbose) 443 printf("TSC timecounter disabled: APM enabled.\n"); 444 goto init; 445 } 446 447 /* 448 * We cannot use the TSC if it stops incrementing in deep sleep. 449 * Currently only Intel CPUs are known for this problem unless 450 * the invariant TSC bit is set. 451 */ 452 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 453 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 454 tsc_timecounter.tc_quality = -1000; 455 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 456 if (bootverbose) 457 printf("TSC timecounter disabled: C3 enabled.\n"); 458 goto init; 459 } 460 461 #ifdef SMP 462 /* 463 * We can not use the TSC in SMP mode unless the TSCs on all CPUs are 464 * synchronized. If the user is sure that the system has synchronized 465 * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value. 466 * We also limit the frequency even lower to avoid "temporal anomalies" 467 * as much as possible. The TSC seems unreliable in virtualized SMP 468 * environments, so it is set to a negative quality in those cases. 469 */ 470 if (smp_cpus > 1) { 471 if (vm_guest != 0) { 472 tsc_timecounter.tc_quality = -100; 473 } else { 474 tsc_timecounter.tc_quality = test_smp_tsc(); 475 max_freq >>= 8; 476 } 477 } else 478 #endif 479 if (tsc_is_invariant) 480 tsc_timecounter.tc_quality = 1000; 481 482 init: 483 for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++) 484 ; 485 if (shift > 0) { 486 tsc_timecounter.tc_get_timecount = tsc_get_timecount_low; 487 tsc_timecounter.tc_name = "TSC-low"; 488 if (bootverbose) 489 printf("TSC timecounter discards lower %d bit(s)\n", 490 shift); 491 } 492 if (tsc_freq != 0) { 493 tsc_timecounter.tc_frequency = tsc_freq >> shift; 494 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 495 tc_init(&tsc_timecounter); 496 } 497 } 498 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 499 500 /* 501 * When cpufreq levels change, find out about the (new) max frequency. We 502 * use this to update CPU accounting in case it got a lower estimate at boot. 503 */ 504 static void 505 tsc_levels_changed(void *arg, int unit) 506 { 507 device_t cf_dev; 508 struct cf_level *levels; 509 int count, error; 510 uint64_t max_freq; 511 512 /* Only use values from the first CPU, assuming all are equal. */ 513 if (unit != 0) 514 return; 515 516 /* Find the appropriate cpufreq device instance. */ 517 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 518 if (cf_dev == NULL) { 519 printf("tsc_levels_changed() called but no cpufreq device?\n"); 520 return; 521 } 522 523 /* Get settings from the device and find the max frequency. */ 524 count = 64; 525 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 526 if (levels == NULL) 527 return; 528 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 529 if (error == 0 && count != 0) { 530 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 531 set_cputicker(rdtsc, max_freq, 1); 532 } else 533 printf("tsc_levels_changed: no max freq found\n"); 534 free(levels, M_TEMP); 535 } 536 537 /* 538 * If the TSC timecounter is in use, veto the pending change. It may be 539 * possible in the future to handle a dynamically-changing timecounter rate. 540 */ 541 static void 542 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 543 { 544 545 if (*status != 0 || timecounter != &tsc_timecounter) 546 return; 547 548 printf("timecounter TSC must not be in use when " 549 "changing frequencies; change denied\n"); 550 *status = EBUSY; 551 } 552 553 /* Update TSC freq with the value indicated by the caller. */ 554 static void 555 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 556 { 557 uint64_t freq; 558 559 /* If there was an error during the transition, don't do anything. */ 560 if (tsc_disabled || status != 0) 561 return; 562 563 /* Total setting for this level gives the new frequency in MHz. */ 564 freq = (uint64_t)level->total_set.freq * 1000000; 565 atomic_store_rel_64(&tsc_freq, freq); 566 tsc_timecounter.tc_frequency = 567 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 568 } 569 570 static int 571 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 572 { 573 int error; 574 uint64_t freq; 575 576 freq = atomic_load_acq_64(&tsc_freq); 577 if (freq == 0) 578 return (EOPNOTSUPP); 579 error = sysctl_handle_64(oidp, &freq, 0, req); 580 if (error == 0 && req->newptr != NULL) { 581 atomic_store_rel_64(&tsc_freq, freq); 582 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 583 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 584 } 585 return (error); 586 } 587 588 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 589 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 590 591 static u_int 592 tsc_get_timecount(struct timecounter *tc __unused) 593 { 594 595 return (rdtsc32()); 596 } 597 598 static u_int 599 tsc_get_timecount_low(struct timecounter *tc) 600 { 601 uint32_t rv; 602 603 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 604 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 605 return (rv); 606 } 607