1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include "opt_clock.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/cpu.h> 36 #include <sys/eventhandler.h> 37 #include <sys/limits.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/sched.h> 41 #include <sys/sysctl.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/kernel.h> 45 #include <sys/smp.h> 46 #include <sys/vdso.h> 47 #include <machine/clock.h> 48 #include <machine/cputypes.h> 49 #include <machine/fpu.h> 50 #include <machine/md_var.h> 51 #include <machine/specialreg.h> 52 #include <x86/vmware.h> 53 #include <dev/acpica/acpi_hpet.h> 54 #include <contrib/dev/acpica/include/acpi.h> 55 56 #include "cpufreq_if.h" 57 58 uint64_t tsc_freq; 59 int tsc_is_invariant; 60 int tsc_perf_stat; 61 static int tsc_early_calib_exact; 62 63 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 64 65 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 66 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 67 68 #ifdef SMP 69 int smp_tsc; 70 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 71 "Indicates whether the TSC is safe to use in SMP mode"); 72 73 int smp_tsc_adjust = 0; 74 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 75 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 76 #endif 77 78 static int tsc_shift = 1; 79 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 80 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 81 82 static int tsc_disabled; 83 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 84 "Disable x86 Time Stamp Counter"); 85 86 static int tsc_skip_calibration; 87 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 88 &tsc_skip_calibration, 0, 89 "Disable early TSC frequency calibration"); 90 91 static void tsc_freq_changed(void *arg, const struct cf_level *level, 92 int status); 93 static void tsc_freq_changing(void *arg, const struct cf_level *level, 94 int *status); 95 static u_int tsc_get_timecount(struct timecounter *tc); 96 static inline u_int tsc_get_timecount_low(struct timecounter *tc); 97 static u_int tsc_get_timecount_lfence(struct timecounter *tc); 98 static u_int tsc_get_timecount_low_lfence(struct timecounter *tc); 99 static u_int tsc_get_timecount_mfence(struct timecounter *tc); 100 static u_int tsc_get_timecount_low_mfence(struct timecounter *tc); 101 static u_int tscp_get_timecount(struct timecounter *tc); 102 static u_int tscp_get_timecount_low(struct timecounter *tc); 103 static void tsc_levels_changed(void *arg, int unit); 104 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 105 struct timecounter *tc); 106 #ifdef COMPAT_FREEBSD32 107 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 108 struct timecounter *tc); 109 #endif 110 111 static struct timecounter tsc_timecounter = { 112 .tc_get_timecount = tsc_get_timecount, 113 .tc_counter_mask = ~0u, 114 .tc_name = "TSC", 115 .tc_quality = 800, /* adjusted in code */ 116 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 117 #ifdef COMPAT_FREEBSD32 118 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 119 #endif 120 }; 121 122 static int 123 tsc_freq_cpuid_vm(void) 124 { 125 u_int regs[4]; 126 127 if (vm_guest == VM_GUEST_NO) 128 return (false); 129 if (hv_high < 0x40000010) 130 return (false); 131 do_cpuid(0x40000010, regs); 132 tsc_freq = (uint64_t)(regs[0]) * 1000; 133 tsc_early_calib_exact = 1; 134 return (true); 135 } 136 137 static void 138 tsc_freq_vmware(void) 139 { 140 u_int regs[4]; 141 142 vmware_hvcall(0, VMW_HVCMD_GETHZ, VMW_HVCMD_DEFAULT_PARAM, regs); 143 if (regs[1] != UINT_MAX) 144 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 145 tsc_early_calib_exact = 1; 146 } 147 148 static void 149 tsc_freq_xen(void) 150 { 151 u_int regs[4]; 152 153 /* 154 * Must run *after* generic tsc_freq_cpuid_vm, so that when Xen is 155 * emulating Viridian support the Viridian leaf is used instead. 156 */ 157 KASSERT(hv_high >= 0x40000003, ("Invalid max hypervisor leaf on Xen")); 158 cpuid_count(0x40000003, 0, regs); 159 tsc_freq = (uint64_t)(regs[2]) * 1000; 160 tsc_early_calib_exact = 1; 161 } 162 163 /* 164 * Calculate TSC frequency using information from the CPUID leaf 0x15 'Time 165 * Stamp Counter and Nominal Core Crystal Clock'. If leaf 0x15 is not 166 * functional, as it is on Skylake/Kabylake, try 0x16 'Processor Frequency 167 * Information'. Leaf 0x16 is described in the SDM as informational only, but 168 * we can use this value until late calibration is complete. 169 */ 170 static bool 171 tsc_freq_cpuid(uint64_t *res) 172 { 173 u_int regs[4]; 174 175 if (cpu_high < 0x15) 176 return (false); 177 do_cpuid(0x15, regs); 178 if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) { 179 *res = (uint64_t)regs[2] * regs[1] / regs[0]; 180 return (true); 181 } 182 183 if (cpu_high < 0x16) 184 return (false); 185 do_cpuid(0x16, regs); 186 if (regs[0] != 0) { 187 *res = (uint64_t)regs[0] * 1000000; 188 return (true); 189 } 190 191 return (false); 192 } 193 194 static bool 195 tsc_freq_intel_brand(uint64_t *res) 196 { 197 char brand[48]; 198 u_int regs[4]; 199 uint64_t freq; 200 char *p; 201 u_int i; 202 203 /* 204 * Intel Processor Identification and the CPUID Instruction 205 * Application Note 485. 206 * http://www.intel.com/assets/pdf/appnote/241618.pdf 207 */ 208 if (cpu_exthigh >= 0x80000004) { 209 p = brand; 210 for (i = 0x80000002; i < 0x80000005; i++) { 211 do_cpuid(i, regs); 212 memcpy(p, regs, sizeof(regs)); 213 p += sizeof(regs); 214 } 215 p = NULL; 216 for (i = 0; i < sizeof(brand) - 1; i++) 217 if (brand[i] == 'H' && brand[i + 1] == 'z') 218 p = brand + i; 219 if (p != NULL) { 220 p -= 5; 221 switch (p[4]) { 222 case 'M': 223 i = 1; 224 break; 225 case 'G': 226 i = 1000; 227 break; 228 case 'T': 229 i = 1000000; 230 break; 231 default: 232 return (false); 233 } 234 #define C2D(c) ((c) - '0') 235 if (p[1] == '.') { 236 freq = C2D(p[0]) * 1000; 237 freq += C2D(p[2]) * 100; 238 freq += C2D(p[3]) * 10; 239 freq *= i * 1000; 240 } else { 241 freq = C2D(p[0]) * 1000; 242 freq += C2D(p[1]) * 100; 243 freq += C2D(p[2]) * 10; 244 freq += C2D(p[3]); 245 freq *= i * 1000000; 246 } 247 #undef C2D 248 *res = freq; 249 return (true); 250 } 251 } 252 return (false); 253 } 254 255 static void 256 tsc_freq_tc(uint64_t *res) 257 { 258 uint64_t tsc1, tsc2; 259 int64_t overhead; 260 int count, i; 261 262 overhead = 0; 263 for (i = 0, count = 8; i < count; i++) { 264 tsc1 = rdtsc_ordered(); 265 DELAY(0); 266 tsc2 = rdtsc_ordered(); 267 if (i > 0) 268 overhead += tsc2 - tsc1; 269 } 270 overhead /= count; 271 272 tsc1 = rdtsc_ordered(); 273 DELAY(100000); 274 tsc2 = rdtsc_ordered(); 275 tsc_freq = (tsc2 - tsc1 - overhead) * 10; 276 } 277 278 /* 279 * Try to determine the TSC frequency using CPUID or hypercalls. If successful, 280 * this lets use the TSC for early DELAY() calls instead of the 8254 timer, 281 * which may be unreliable or entirely absent on contemporary systems. However, 282 * avoid calibrating using the 8254 here so as to give hypervisors a chance to 283 * register a timecounter that can be used instead. 284 */ 285 static void 286 probe_tsc_freq_early(void) 287 { 288 #ifdef __i386__ 289 /* The TSC is known to be broken on certain CPUs. */ 290 switch (cpu_vendor_id) { 291 case CPU_VENDOR_AMD: 292 switch (cpu_id & 0xFF0) { 293 case 0x500: 294 /* K5 Model 0 */ 295 tsc_disabled = 1; 296 return; 297 } 298 break; 299 case CPU_VENDOR_CENTAUR: 300 switch (cpu_id & 0xff0) { 301 case 0x540: 302 /* 303 * http://www.centtech.com/c6_data_sheet.pdf 304 * 305 * I-12 RDTSC may return incoherent values in EDX:EAX 306 * I-13 RDTSC hangs when certain event counters are used 307 */ 308 tsc_disabled = 1; 309 return; 310 } 311 break; 312 case CPU_VENDOR_NSC: 313 switch (cpu_id & 0xff0) { 314 case 0x540: 315 if ((cpu_id & CPUID_STEPPING) == 0) { 316 tsc_disabled = 1; 317 return; 318 } 319 break; 320 } 321 break; 322 } 323 #endif 324 325 switch (cpu_vendor_id) { 326 case CPU_VENDOR_AMD: 327 case CPU_VENDOR_HYGON: 328 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 329 (vm_guest == VM_GUEST_NO && 330 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 331 tsc_is_invariant = 1; 332 if (cpu_feature & CPUID_SSE2) { 333 tsc_timecounter.tc_get_timecount = 334 tsc_get_timecount_mfence; 335 } 336 break; 337 case CPU_VENDOR_INTEL: 338 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 339 (vm_guest == VM_GUEST_NO && 340 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 341 CPUID_TO_MODEL(cpu_id) >= 0xe) || 342 (CPUID_TO_FAMILY(cpu_id) == 0xf && 343 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 344 tsc_is_invariant = 1; 345 if (cpu_feature & CPUID_SSE2) { 346 tsc_timecounter.tc_get_timecount = 347 tsc_get_timecount_lfence; 348 } 349 break; 350 case CPU_VENDOR_CENTAUR: 351 if (vm_guest == VM_GUEST_NO && 352 CPUID_TO_FAMILY(cpu_id) == 0x6 && 353 CPUID_TO_MODEL(cpu_id) >= 0xf && 354 (rdmsr(0x1203) & 0x100000000ULL) == 0) 355 tsc_is_invariant = 1; 356 if (cpu_feature & CPUID_SSE2) { 357 tsc_timecounter.tc_get_timecount = 358 tsc_get_timecount_lfence; 359 } 360 break; 361 } 362 363 if (tsc_freq_cpuid_vm()) { 364 if (bootverbose) 365 printf( 366 "Early TSC frequency %juHz derived from hypervisor CPUID\n", 367 (uintmax_t)tsc_freq); 368 } else if (vm_guest == VM_GUEST_VMWARE) { 369 tsc_freq_vmware(); 370 if (bootverbose) 371 printf( 372 "Early TSC frequency %juHz derived from VMWare hypercall\n", 373 (uintmax_t)tsc_freq); 374 } else if (vm_guest == VM_GUEST_XEN) { 375 tsc_freq_xen(); 376 if (bootverbose) 377 printf( 378 "Early TSC frequency %juHz derived from Xen CPUID\n", 379 (uintmax_t)tsc_freq); 380 } else if (tsc_freq_cpuid(&tsc_freq)) { 381 /* 382 * If possible, use the value obtained from CPUID as the initial 383 * frequency. This will be refined later during boot but is 384 * good enough for now. The 8254 PIT is not functional on some 385 * newer platforms anyway, so don't delay our boot for what 386 * might be a garbage result. Late calibration is required if 387 * the initial frequency was obtained from CPUID.16H, as the 388 * derived value may be off by as much as 1%. 389 */ 390 if (bootverbose) 391 printf("Early TSC frequency %juHz derived from CPUID\n", 392 (uintmax_t)tsc_freq); 393 } 394 } 395 396 /* 397 * If we were unable to determine the TSC frequency via CPU registers, try 398 * to calibrate against a known clock. 399 */ 400 static void 401 probe_tsc_freq_late(void) 402 { 403 if (tsc_freq != 0) 404 return; 405 406 if (tsc_skip_calibration) { 407 /* 408 * Try to parse the brand string to obtain the nominal TSC 409 * frequency. 410 */ 411 if (cpu_vendor_id == CPU_VENDOR_INTEL && 412 tsc_freq_intel_brand(&tsc_freq)) { 413 if (bootverbose) 414 printf( 415 "Early TSC frequency %juHz derived from brand string\n", 416 (uintmax_t)tsc_freq); 417 } else { 418 tsc_disabled = 1; 419 } 420 } else { 421 /* 422 * Calibrate against a timecounter or the 8254 PIT. This 423 * estimate will be refined later in tsc_calib(). 424 */ 425 tsc_freq_tc(&tsc_freq); 426 if (bootverbose) 427 printf( 428 "Early TSC frequency %juHz calibrated from 8254 PIT\n", 429 (uintmax_t)tsc_freq); 430 } 431 } 432 433 void 434 start_TSC(void) 435 { 436 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 437 return; 438 439 probe_tsc_freq_late(); 440 441 if (cpu_power_ecx & CPUID_PERF_STAT) { 442 /* 443 * XXX Some emulators expose host CPUID without actual support 444 * for these MSRs. We must test whether they really work. 445 */ 446 wrmsr(MSR_MPERF, 0); 447 wrmsr(MSR_APERF, 0); 448 DELAY(10); 449 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 450 tsc_perf_stat = 1; 451 } 452 453 /* 454 * Inform CPU accounting about our boot-time clock rate. This will 455 * be updated if someone loads a cpufreq driver after boot that 456 * discovers a new max frequency. 457 * 458 * The frequency may also be updated after late calibration is complete; 459 * however, we register the TSC as the ticker now to avoid switching 460 * counters after much of the kernel has already booted and potentially 461 * sampled the CPU clock. 462 */ 463 if (tsc_freq != 0) 464 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 465 466 if (tsc_is_invariant) 467 return; 468 469 /* Register to find out about changes in CPU frequency. */ 470 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 471 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 472 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 473 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 474 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 475 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 476 } 477 478 #ifdef SMP 479 480 /* 481 * RDTSC is not a serializing instruction, and does not drain 482 * instruction stream, so we need to drain the stream before executing 483 * it. It could be fixed by use of RDTSCP, except the instruction is 484 * not available everywhere. 485 * 486 * Use CPUID for draining in the boot-time SMP constistency test. The 487 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 488 * and VIA) when SSE2 is present, and nothing on older machines which 489 * also do not issue RDTSC prematurely. There, testing for SSE2 and 490 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 491 * 492 * Do not use do_cpuid(), since we do not need CPUID results, which 493 * have to be written into memory with do_cpuid(). 494 */ 495 #define TSC_READ(x) \ 496 static void \ 497 tsc_read_##x(void *arg) \ 498 { \ 499 uint64_t *tsc = arg; \ 500 u_int cpu = PCPU_GET(cpuid); \ 501 \ 502 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 503 tsc[cpu * 3 + x] = rdtsc(); \ 504 } 505 TSC_READ(0) 506 TSC_READ(1) 507 TSC_READ(2) 508 #undef TSC_READ 509 510 #define N 1000 511 512 static void 513 comp_smp_tsc(void *arg) 514 { 515 uint64_t *tsc; 516 int64_t d1, d2; 517 u_int cpu = PCPU_GET(cpuid); 518 u_int i, j, size; 519 520 size = (mp_maxid + 1) * 3; 521 for (i = 0, tsc = arg; i < N; i++, tsc += size) 522 CPU_FOREACH(j) { 523 if (j == cpu) 524 continue; 525 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 526 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 527 if (d1 <= 0 || d2 <= 0) { 528 smp_tsc = 0; 529 return; 530 } 531 } 532 } 533 534 static void 535 adj_smp_tsc(void *arg) 536 { 537 uint64_t *tsc; 538 int64_t d, min, max; 539 u_int cpu = PCPU_GET(cpuid); 540 u_int first, i, size; 541 542 first = CPU_FIRST(); 543 if (cpu == first) 544 return; 545 min = INT64_MIN; 546 max = INT64_MAX; 547 size = (mp_maxid + 1) * 3; 548 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 549 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 550 if (d > min) 551 min = d; 552 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 553 if (d > min) 554 min = d; 555 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 556 if (d < max) 557 max = d; 558 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 559 if (d < max) 560 max = d; 561 } 562 if (min > max) 563 return; 564 d = min / 2 + max / 2; 565 __asm __volatile ( 566 "movl $0x10, %%ecx\n\t" 567 "rdmsr\n\t" 568 "addl %%edi, %%eax\n\t" 569 "adcl %%esi, %%edx\n\t" 570 "wrmsr\n" 571 : /* No output */ 572 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 573 : "ax", "cx", "dx", "cc" 574 ); 575 } 576 577 static int 578 test_tsc(int adj_max_count) 579 { 580 uint64_t *data, *tsc; 581 u_int i, size, adj; 582 583 if ((!smp_tsc && !tsc_is_invariant)) 584 return (-100); 585 /* 586 * Misbehavior of TSC under VirtualBox has been observed. In 587 * particular, threads doing small (~1 second) sleeps may miss their 588 * wakeup and hang around in sleep state, causing hangs on shutdown. 589 */ 590 if (vm_guest == VM_GUEST_VBOX) 591 return (0); 592 593 TSENTER(); 594 size = (mp_maxid + 1) * 3; 595 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 596 adj = 0; 597 retry: 598 for (i = 0, tsc = data; i < N; i++, tsc += size) 599 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 600 smp_tsc = 1; /* XXX */ 601 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 602 smp_no_rendezvous_barrier, data); 603 if (!smp_tsc && adj < adj_max_count) { 604 adj++; 605 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 606 smp_no_rendezvous_barrier, data); 607 goto retry; 608 } 609 free(data, M_TEMP); 610 if (bootverbose) 611 printf("SMP: %sed TSC synchronization test%s\n", 612 smp_tsc ? "pass" : "fail", 613 adj > 0 ? " after adjustment" : ""); 614 TSEXIT(); 615 if (smp_tsc && tsc_is_invariant) { 616 switch (cpu_vendor_id) { 617 case CPU_VENDOR_AMD: 618 case CPU_VENDOR_HYGON: 619 /* 620 * Processor Programming Reference (PPR) for AMD 621 * Family 17h states that the TSC uses a common 622 * reference for all sockets, cores and threads. 623 */ 624 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) 625 return (1000); 626 /* 627 * Starting with Family 15h processors, TSC clock 628 * source is in the north bridge. Check whether 629 * we have a single-socket/multi-core platform. 630 * XXX Need more work for complex cases. 631 */ 632 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 633 (amd_feature2 & AMDID2_CMP) == 0 || 634 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 635 break; 636 return (1000); 637 case CPU_VENDOR_INTEL: 638 /* 639 * XXX Assume Intel platforms have synchronized TSCs. 640 */ 641 return (1000); 642 } 643 return (800); 644 } 645 return (-100); 646 } 647 648 #undef N 649 650 #endif /* SMP */ 651 652 static void 653 init_TSC_tc(void) 654 { 655 uint64_t max_freq; 656 int shift; 657 658 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 659 return; 660 661 /* 662 * Limit timecounter frequency to fit in an int and prevent it from 663 * overflowing too fast. 664 */ 665 max_freq = UINT_MAX; 666 667 /* 668 * Intel CPUs without a C-state invariant TSC can stop the TSC 669 * in either C2 or C3. Disable use of C2 and C3 while using 670 * the TSC as the timecounter. The timecounter can be changed 671 * to enable C2 and C3. 672 * 673 * Note that the TSC is used as the cputicker for computing 674 * thread runtime regardless of the timecounter setting, so 675 * using an alternate timecounter and enabling C2 or C3 can 676 * result incorrect runtimes for kernel idle threads (but not 677 * for any non-idle threads). 678 */ 679 if (cpu_vendor_id == CPU_VENDOR_INTEL && 680 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 681 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 682 if (bootverbose) 683 printf("TSC timecounter disables C2 and C3.\n"); 684 } 685 686 /* 687 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 688 * are synchronized. If the user is sure that the system has 689 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 690 * non-zero value. The TSC seems unreliable in virtualized SMP 691 * environments, so it is set to a negative quality in those cases. 692 */ 693 #ifdef SMP 694 if (mp_ncpus > 1) 695 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 696 else 697 #endif /* SMP */ 698 if (tsc_is_invariant) 699 tsc_timecounter.tc_quality = 1000; 700 max_freq >>= tsc_shift; 701 702 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 703 ; 704 705 /* 706 * Timecounter implementation selection, top to bottom: 707 * - If RDTSCP is available, use RDTSCP. 708 * - If fence instructions are provided (SSE2), use LFENCE;RDTSC 709 * on Intel, and MFENCE;RDTSC on AMD. 710 * - For really old CPUs, just use RDTSC. 711 */ 712 if ((amd_feature & AMDID_RDTSCP) != 0) { 713 tsc_timecounter.tc_get_timecount = shift > 0 ? 714 tscp_get_timecount_low : tscp_get_timecount; 715 } else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 716 if (cpu_vendor_id == CPU_VENDOR_AMD || 717 cpu_vendor_id == CPU_VENDOR_HYGON) { 718 tsc_timecounter.tc_get_timecount = shift > 0 ? 719 tsc_get_timecount_low_mfence : 720 tsc_get_timecount_mfence; 721 } else { 722 tsc_timecounter.tc_get_timecount = shift > 0 ? 723 tsc_get_timecount_low_lfence : 724 tsc_get_timecount_lfence; 725 } 726 } else { 727 tsc_timecounter.tc_get_timecount = shift > 0 ? 728 tsc_get_timecount_low : tsc_get_timecount; 729 } 730 if (shift > 0) { 731 tsc_timecounter.tc_name = "TSC-low"; 732 if (bootverbose) 733 printf("TSC timecounter discards lower %d bit(s)\n", 734 shift); 735 } 736 if (tsc_freq != 0) { 737 tsc_timecounter.tc_frequency = tsc_freq >> shift; 738 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 739 740 /* 741 * Timecounter registration is deferred until after late 742 * calibration is finished. 743 */ 744 } 745 } 746 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 747 748 static void 749 tsc_update_freq(uint64_t new_freq) 750 { 751 atomic_store_rel_64(&tsc_freq, new_freq); 752 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 753 new_freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 754 } 755 756 void 757 tsc_init(void) 758 { 759 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 760 return; 761 762 probe_tsc_freq_early(); 763 } 764 765 /* 766 * Perform late calibration of the TSC frequency once ACPI-based timecounters 767 * are available. At this point timehands are not set up, so we read the 768 * highest-quality timecounter directly rather than using (s)binuptime(). 769 */ 770 void 771 tsc_calibrate(void) 772 { 773 uint64_t freq; 774 775 if (tsc_disabled) 776 return; 777 if (tsc_early_calib_exact) 778 goto calibrated; 779 780 fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); 781 freq = clockcalib(rdtsc_ordered, "TSC"); 782 fpu_kern_leave(curthread, NULL); 783 tsc_update_freq(freq); 784 785 calibrated: 786 tc_init(&tsc_timecounter); 787 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 788 } 789 790 void 791 resume_TSC(void) 792 { 793 #ifdef SMP 794 int quality; 795 796 /* If TSC was not good on boot, it is unlikely to become good now. */ 797 if (tsc_timecounter.tc_quality < 0) 798 return; 799 /* Nothing to do with UP. */ 800 if (mp_ncpus < 2) 801 return; 802 803 /* 804 * If TSC was good, a single synchronization should be enough, 805 * but honour smp_tsc_adjust if it's set. 806 */ 807 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 808 if (quality != tsc_timecounter.tc_quality) { 809 printf("TSC timecounter quality changed: %d -> %d\n", 810 tsc_timecounter.tc_quality, quality); 811 tsc_timecounter.tc_quality = quality; 812 } 813 #endif /* SMP */ 814 } 815 816 /* 817 * When cpufreq levels change, find out about the (new) max frequency. We 818 * use this to update CPU accounting in case it got a lower estimate at boot. 819 */ 820 static void 821 tsc_levels_changed(void *arg, int unit) 822 { 823 device_t cf_dev; 824 struct cf_level *levels; 825 int count, error; 826 uint64_t max_freq; 827 828 /* Only use values from the first CPU, assuming all are equal. */ 829 if (unit != 0) 830 return; 831 832 /* Find the appropriate cpufreq device instance. */ 833 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 834 if (cf_dev == NULL) { 835 printf("tsc_levels_changed() called but no cpufreq device?\n"); 836 return; 837 } 838 839 /* Get settings from the device and find the max frequency. */ 840 count = 64; 841 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 842 if (levels == NULL) 843 return; 844 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 845 if (error == 0 && count != 0) { 846 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 847 set_cputicker(rdtsc, max_freq, true); 848 } else 849 printf("tsc_levels_changed: no max freq found\n"); 850 free(levels, M_TEMP); 851 } 852 853 /* 854 * If the TSC timecounter is in use, veto the pending change. It may be 855 * possible in the future to handle a dynamically-changing timecounter rate. 856 */ 857 static void 858 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 859 { 860 861 if (*status != 0 || timecounter != &tsc_timecounter) 862 return; 863 864 printf("timecounter TSC must not be in use when " 865 "changing frequencies; change denied\n"); 866 *status = EBUSY; 867 } 868 869 /* Update TSC freq with the value indicated by the caller. */ 870 static void 871 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 872 { 873 uint64_t freq; 874 875 /* If there was an error during the transition, don't do anything. */ 876 if (tsc_disabled || status != 0) 877 return; 878 879 /* Total setting for this level gives the new frequency in MHz. */ 880 freq = (uint64_t)level->total_set.freq * 1000000; 881 tsc_update_freq(freq); 882 } 883 884 static int 885 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 886 { 887 int error; 888 uint64_t freq; 889 890 freq = atomic_load_acq_64(&tsc_freq); 891 if (freq == 0) 892 return (EOPNOTSUPP); 893 error = sysctl_handle_64(oidp, &freq, 0, req); 894 if (error == 0 && req->newptr != NULL) 895 tsc_update_freq(freq); 896 return (error); 897 } 898 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, 899 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 900 0, 0, sysctl_machdep_tsc_freq, "QU", 901 "Time Stamp Counter frequency"); 902 903 static u_int 904 tsc_get_timecount(struct timecounter *tc __unused) 905 { 906 907 return (rdtsc32()); 908 } 909 910 static u_int 911 tscp_get_timecount(struct timecounter *tc __unused) 912 { 913 914 return (rdtscp32()); 915 } 916 917 static inline u_int 918 tsc_get_timecount_low(struct timecounter *tc) 919 { 920 uint32_t rv; 921 922 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 923 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 924 return (rv); 925 } 926 927 static u_int 928 tscp_get_timecount_low(struct timecounter *tc) 929 { 930 uint32_t rv; 931 932 __asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0" 933 : "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx"); 934 return (rv); 935 } 936 937 static u_int 938 tsc_get_timecount_lfence(struct timecounter *tc __unused) 939 { 940 941 lfence(); 942 return (rdtsc32()); 943 } 944 945 static u_int 946 tsc_get_timecount_low_lfence(struct timecounter *tc) 947 { 948 949 lfence(); 950 return (tsc_get_timecount_low(tc)); 951 } 952 953 static u_int 954 tsc_get_timecount_mfence(struct timecounter *tc __unused) 955 { 956 957 mfence(); 958 return (rdtsc32()); 959 } 960 961 static u_int 962 tsc_get_timecount_low_mfence(struct timecounter *tc) 963 { 964 965 mfence(); 966 return (tsc_get_timecount_low(tc)); 967 } 968 969 static uint32_t 970 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 971 { 972 973 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 974 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 975 vdso_th->th_x86_hpet_idx = 0xffffffff; 976 vdso_th->th_x86_pvc_last_systime = 0; 977 vdso_th->th_x86_pvc_stable_mask = 0; 978 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 979 return (1); 980 } 981 982 #ifdef COMPAT_FREEBSD32 983 static uint32_t 984 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 985 struct timecounter *tc) 986 { 987 988 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 989 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 990 vdso_th32->th_x86_hpet_idx = 0xffffffff; 991 vdso_th32->th_x86_pvc_last_systime[0] = 0; 992 vdso_th32->th_x86_pvc_last_systime[1] = 0; 993 vdso_th32->th_x86_pvc_stable_mask = 0; 994 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 995 return (1); 996 } 997 #endif 998