1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_clock.h" 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/cpu.h> 37 #include <sys/eventhandler.h> 38 #include <sys/limits.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/kernel.h> 45 #include <sys/power.h> 46 #include <sys/smp.h> 47 #include <sys/vdso.h> 48 #include <machine/clock.h> 49 #include <machine/cputypes.h> 50 #include <machine/md_var.h> 51 #include <machine/specialreg.h> 52 #include <x86/vmware.h> 53 #include <dev/acpica/acpi_hpet.h> 54 #include <contrib/dev/acpica/include/acpi.h> 55 56 #include "cpufreq_if.h" 57 58 uint64_t tsc_freq; 59 int tsc_is_invariant; 60 int tsc_perf_stat; 61 62 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 63 64 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 65 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 66 67 #ifdef SMP 68 int smp_tsc; 69 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 70 "Indicates whether the TSC is safe to use in SMP mode"); 71 72 int smp_tsc_adjust = 0; 73 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 74 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 75 #endif 76 77 static int tsc_shift = 1; 78 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 79 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 80 81 static int tsc_disabled; 82 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 83 "Disable x86 Time Stamp Counter"); 84 85 static int tsc_skip_calibration; 86 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 87 &tsc_skip_calibration, 0, 88 "Disable TSC frequency calibration"); 89 90 static void tsc_freq_changed(void *arg, const struct cf_level *level, 91 int status); 92 static void tsc_freq_changing(void *arg, const struct cf_level *level, 93 int *status); 94 static u_int tsc_get_timecount(struct timecounter *tc); 95 static inline u_int tsc_get_timecount_low(struct timecounter *tc); 96 static u_int tsc_get_timecount_lfence(struct timecounter *tc); 97 static u_int tsc_get_timecount_low_lfence(struct timecounter *tc); 98 static u_int tsc_get_timecount_mfence(struct timecounter *tc); 99 static u_int tsc_get_timecount_low_mfence(struct timecounter *tc); 100 static u_int tscp_get_timecount(struct timecounter *tc); 101 static u_int tscp_get_timecount_low(struct timecounter *tc); 102 static void tsc_levels_changed(void *arg, int unit); 103 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 104 struct timecounter *tc); 105 #ifdef COMPAT_FREEBSD32 106 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 107 struct timecounter *tc); 108 #endif 109 110 static struct timecounter tsc_timecounter = { 111 .tc_get_timecount = tsc_get_timecount, 112 .tc_counter_mask = ~0u, 113 .tc_name = "TSC", 114 .tc_quality = 800, /* adjusted in code */ 115 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 116 #ifdef COMPAT_FREEBSD32 117 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 118 #endif 119 }; 120 121 static void 122 tsc_freq_vmware(void) 123 { 124 u_int regs[4]; 125 126 if (hv_high >= 0x40000010) { 127 do_cpuid(0x40000010, regs); 128 tsc_freq = regs[0] * 1000; 129 } else { 130 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 131 if (regs[1] != UINT_MAX) 132 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 133 } 134 tsc_is_invariant = 1; 135 } 136 137 /* 138 * Calculate TSC frequency using information from the CPUID leaf 0x15 139 * 'Time Stamp Counter and Nominal Core Crystal Clock'. If leaf 0x15 140 * is not functional, as it is on Skylake/Kabylake, try 0x16 'Processor 141 * Frequency Information'. Leaf 0x16 is described in the SDM as 142 * informational only, but if 0x15 did not work, and TSC calibration 143 * is disabled, it is the best we can get at all. It should still be 144 * an improvement over the parsing of the CPU model name in 145 * tsc_freq_intel(), when available. 146 */ 147 static bool 148 tsc_freq_cpuid(uint64_t *res) 149 { 150 u_int regs[4]; 151 152 if (cpu_high < 0x15) 153 return (false); 154 do_cpuid(0x15, regs); 155 if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) { 156 *res = (uint64_t)regs[2] * regs[1] / regs[0]; 157 return (true); 158 } 159 160 if (cpu_high < 0x16) 161 return (false); 162 do_cpuid(0x16, regs); 163 if (regs[0] != 0) { 164 *res = (uint64_t)regs[0] * 1000000; 165 return (true); 166 } 167 168 return (false); 169 } 170 171 static void 172 tsc_freq_intel(void) 173 { 174 char brand[48]; 175 u_int regs[4]; 176 uint64_t freq; 177 char *p; 178 u_int i; 179 180 /* 181 * Intel Processor Identification and the CPUID Instruction 182 * Application Note 485. 183 * http://www.intel.com/assets/pdf/appnote/241618.pdf 184 */ 185 if (cpu_exthigh >= 0x80000004) { 186 p = brand; 187 for (i = 0x80000002; i < 0x80000005; i++) { 188 do_cpuid(i, regs); 189 memcpy(p, regs, sizeof(regs)); 190 p += sizeof(regs); 191 } 192 p = NULL; 193 for (i = 0; i < sizeof(brand) - 1; i++) 194 if (brand[i] == 'H' && brand[i + 1] == 'z') 195 p = brand + i; 196 if (p != NULL) { 197 p -= 5; 198 switch (p[4]) { 199 case 'M': 200 i = 1; 201 break; 202 case 'G': 203 i = 1000; 204 break; 205 case 'T': 206 i = 1000000; 207 break; 208 default: 209 return; 210 } 211 #define C2D(c) ((c) - '0') 212 if (p[1] == '.') { 213 freq = C2D(p[0]) * 1000; 214 freq += C2D(p[2]) * 100; 215 freq += C2D(p[3]) * 10; 216 freq *= i * 1000; 217 } else { 218 freq = C2D(p[0]) * 1000; 219 freq += C2D(p[1]) * 100; 220 freq += C2D(p[2]) * 10; 221 freq += C2D(p[3]); 222 freq *= i * 1000000; 223 } 224 #undef C2D 225 tsc_freq = freq; 226 } 227 } 228 } 229 230 static void 231 probe_tsc_freq(void) 232 { 233 uint64_t tmp_freq, tsc1, tsc2; 234 int no_cpuid_override; 235 236 if (cpu_power_ecx & CPUID_PERF_STAT) { 237 /* 238 * XXX Some emulators expose host CPUID without actual support 239 * for these MSRs. We must test whether they really work. 240 */ 241 wrmsr(MSR_MPERF, 0); 242 wrmsr(MSR_APERF, 0); 243 DELAY(10); 244 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 245 tsc_perf_stat = 1; 246 } 247 248 if (vm_guest == VM_GUEST_VMWARE) { 249 tsc_freq_vmware(); 250 return; 251 } 252 253 switch (cpu_vendor_id) { 254 case CPU_VENDOR_AMD: 255 case CPU_VENDOR_HYGON: 256 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 257 (vm_guest == VM_GUEST_NO && 258 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 259 tsc_is_invariant = 1; 260 if (cpu_feature & CPUID_SSE2) { 261 tsc_timecounter.tc_get_timecount = 262 tsc_get_timecount_mfence; 263 } 264 break; 265 case CPU_VENDOR_INTEL: 266 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 267 (vm_guest == VM_GUEST_NO && 268 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 269 CPUID_TO_MODEL(cpu_id) >= 0xe) || 270 (CPUID_TO_FAMILY(cpu_id) == 0xf && 271 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 272 tsc_is_invariant = 1; 273 if (cpu_feature & CPUID_SSE2) { 274 tsc_timecounter.tc_get_timecount = 275 tsc_get_timecount_lfence; 276 } 277 break; 278 case CPU_VENDOR_CENTAUR: 279 if (vm_guest == VM_GUEST_NO && 280 CPUID_TO_FAMILY(cpu_id) == 0x6 && 281 CPUID_TO_MODEL(cpu_id) >= 0xf && 282 (rdmsr(0x1203) & 0x100000000ULL) == 0) 283 tsc_is_invariant = 1; 284 if (cpu_feature & CPUID_SSE2) { 285 tsc_timecounter.tc_get_timecount = 286 tsc_get_timecount_lfence; 287 } 288 break; 289 } 290 291 if (tsc_skip_calibration) { 292 if (tsc_freq_cpuid(&tmp_freq)) 293 tsc_freq = tmp_freq; 294 else if (cpu_vendor_id == CPU_VENDOR_INTEL) 295 tsc_freq_intel(); 296 if (tsc_freq == 0) 297 tsc_disabled = 1; 298 } else { 299 if (bootverbose) 300 printf("Calibrating TSC clock ... "); 301 tsc1 = rdtsc(); 302 DELAY(1000000); 303 tsc2 = rdtsc(); 304 tsc_freq = tsc2 - tsc1; 305 306 /* 307 * If the difference between calibrated frequency and 308 * the frequency reported by CPUID 0x15/0x16 leafs 309 * differ significantly, this probably means that 310 * calibration is bogus. It happens on machines 311 * without 8254 timer. The BIOS rarely properly 312 * reports it in FADT boot flags, so just compare the 313 * frequencies directly. 314 */ 315 if (tsc_freq_cpuid(&tmp_freq) && qabs(tsc_freq - tmp_freq) > 316 uqmin(tsc_freq, tmp_freq)) { 317 no_cpuid_override = 0; 318 TUNABLE_INT_FETCH("machdep.disable_tsc_cpuid_override", 319 &no_cpuid_override); 320 if (!no_cpuid_override) { 321 if (bootverbose) { 322 printf( 323 "TSC clock: calibration freq %ju Hz, CPUID freq %ju Hz%s\n", 324 (uintmax_t)tsc_freq, 325 (uintmax_t)tmp_freq, 326 no_cpuid_override ? "" : 327 ", doing CPUID override"); 328 } 329 tsc_freq = tmp_freq; 330 } 331 } 332 } 333 if (bootverbose) 334 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 335 } 336 337 void 338 init_TSC(void) 339 { 340 341 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 342 return; 343 344 #ifdef __i386__ 345 /* The TSC is known to be broken on certain CPUs. */ 346 switch (cpu_vendor_id) { 347 case CPU_VENDOR_AMD: 348 switch (cpu_id & 0xFF0) { 349 case 0x500: 350 /* K5 Model 0 */ 351 return; 352 } 353 break; 354 case CPU_VENDOR_CENTAUR: 355 switch (cpu_id & 0xff0) { 356 case 0x540: 357 /* 358 * http://www.centtech.com/c6_data_sheet.pdf 359 * 360 * I-12 RDTSC may return incoherent values in EDX:EAX 361 * I-13 RDTSC hangs when certain event counters are used 362 */ 363 return; 364 } 365 break; 366 case CPU_VENDOR_NSC: 367 switch (cpu_id & 0xff0) { 368 case 0x540: 369 if ((cpu_id & CPUID_STEPPING) == 0) 370 return; 371 break; 372 } 373 break; 374 } 375 #endif 376 377 probe_tsc_freq(); 378 379 /* 380 * Inform CPU accounting about our boot-time clock rate. This will 381 * be updated if someone loads a cpufreq driver after boot that 382 * discovers a new max frequency. 383 */ 384 if (tsc_freq != 0) 385 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 386 387 if (tsc_is_invariant) 388 return; 389 390 /* Register to find out about changes in CPU frequency. */ 391 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 392 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 393 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 394 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 395 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 396 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 397 } 398 399 #ifdef SMP 400 401 /* 402 * RDTSC is not a serializing instruction, and does not drain 403 * instruction stream, so we need to drain the stream before executing 404 * it. It could be fixed by use of RDTSCP, except the instruction is 405 * not available everywhere. 406 * 407 * Use CPUID for draining in the boot-time SMP constistency test. The 408 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 409 * and VIA) when SSE2 is present, and nothing on older machines which 410 * also do not issue RDTSC prematurely. There, testing for SSE2 and 411 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 412 * 413 * Do not use do_cpuid(), since we do not need CPUID results, which 414 * have to be written into memory with do_cpuid(). 415 */ 416 #define TSC_READ(x) \ 417 static void \ 418 tsc_read_##x(void *arg) \ 419 { \ 420 uint64_t *tsc = arg; \ 421 u_int cpu = PCPU_GET(cpuid); \ 422 \ 423 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 424 tsc[cpu * 3 + x] = rdtsc(); \ 425 } 426 TSC_READ(0) 427 TSC_READ(1) 428 TSC_READ(2) 429 #undef TSC_READ 430 431 #define N 1000 432 433 static void 434 comp_smp_tsc(void *arg) 435 { 436 uint64_t *tsc; 437 int64_t d1, d2; 438 u_int cpu = PCPU_GET(cpuid); 439 u_int i, j, size; 440 441 size = (mp_maxid + 1) * 3; 442 for (i = 0, tsc = arg; i < N; i++, tsc += size) 443 CPU_FOREACH(j) { 444 if (j == cpu) 445 continue; 446 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 447 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 448 if (d1 <= 0 || d2 <= 0) { 449 smp_tsc = 0; 450 return; 451 } 452 } 453 } 454 455 static void 456 adj_smp_tsc(void *arg) 457 { 458 uint64_t *tsc; 459 int64_t d, min, max; 460 u_int cpu = PCPU_GET(cpuid); 461 u_int first, i, size; 462 463 first = CPU_FIRST(); 464 if (cpu == first) 465 return; 466 min = INT64_MIN; 467 max = INT64_MAX; 468 size = (mp_maxid + 1) * 3; 469 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 470 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 471 if (d > min) 472 min = d; 473 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 474 if (d > min) 475 min = d; 476 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 477 if (d < max) 478 max = d; 479 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 480 if (d < max) 481 max = d; 482 } 483 if (min > max) 484 return; 485 d = min / 2 + max / 2; 486 __asm __volatile ( 487 "movl $0x10, %%ecx\n\t" 488 "rdmsr\n\t" 489 "addl %%edi, %%eax\n\t" 490 "adcl %%esi, %%edx\n\t" 491 "wrmsr\n" 492 : /* No output */ 493 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 494 : "ax", "cx", "dx", "cc" 495 ); 496 } 497 498 static int 499 test_tsc(int adj_max_count) 500 { 501 uint64_t *data, *tsc; 502 u_int i, size, adj; 503 504 if ((!smp_tsc && !tsc_is_invariant)) 505 return (-100); 506 size = (mp_maxid + 1) * 3; 507 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 508 adj = 0; 509 retry: 510 for (i = 0, tsc = data; i < N; i++, tsc += size) 511 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 512 smp_tsc = 1; /* XXX */ 513 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 514 smp_no_rendezvous_barrier, data); 515 if (!smp_tsc && adj < adj_max_count) { 516 adj++; 517 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 518 smp_no_rendezvous_barrier, data); 519 goto retry; 520 } 521 free(data, M_TEMP); 522 if (bootverbose) 523 printf("SMP: %sed TSC synchronization test%s\n", 524 smp_tsc ? "pass" : "fail", 525 adj > 0 ? " after adjustment" : ""); 526 if (smp_tsc && tsc_is_invariant) { 527 switch (cpu_vendor_id) { 528 case CPU_VENDOR_AMD: 529 case CPU_VENDOR_HYGON: 530 /* 531 * Processor Programming Reference (PPR) for AMD 532 * Family 17h states that the TSC uses a common 533 * reference for all sockets, cores and threads. 534 */ 535 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) 536 return (1000); 537 /* 538 * Starting with Family 15h processors, TSC clock 539 * source is in the north bridge. Check whether 540 * we have a single-socket/multi-core platform. 541 * XXX Need more work for complex cases. 542 */ 543 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 544 (amd_feature2 & AMDID2_CMP) == 0 || 545 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 546 break; 547 return (1000); 548 case CPU_VENDOR_INTEL: 549 /* 550 * XXX Assume Intel platforms have synchronized TSCs. 551 */ 552 return (1000); 553 } 554 return (800); 555 } 556 return (-100); 557 } 558 559 #undef N 560 561 #endif /* SMP */ 562 563 static void 564 init_TSC_tc(void) 565 { 566 uint64_t max_freq; 567 int shift; 568 569 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 570 return; 571 572 /* 573 * Limit timecounter frequency to fit in an int and prevent it from 574 * overflowing too fast. 575 */ 576 max_freq = UINT_MAX; 577 578 /* 579 * We can not use the TSC if we support APM. Precise timekeeping 580 * on an APM'ed machine is at best a fools pursuit, since 581 * any and all of the time spent in various SMM code can't 582 * be reliably accounted for. Reading the RTC is your only 583 * source of reliable time info. The i8254 loses too, of course, 584 * but we need to have some kind of time... 585 * We don't know at this point whether APM is going to be used 586 * or not, nor when it might be activated. Play it safe. 587 */ 588 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 589 tsc_timecounter.tc_quality = -1000; 590 if (bootverbose) 591 printf("TSC timecounter disabled: APM enabled.\n"); 592 goto init; 593 } 594 595 /* 596 * Intel CPUs without a C-state invariant TSC can stop the TSC 597 * in either C2 or C3. Disable use of C2 and C3 while using 598 * the TSC as the timecounter. The timecounter can be changed 599 * to enable C2 and C3. 600 * 601 * Note that the TSC is used as the cputicker for computing 602 * thread runtime regardless of the timecounter setting, so 603 * using an alternate timecounter and enabling C2 or C3 can 604 * result incorrect runtimes for kernel idle threads (but not 605 * for any non-idle threads). 606 */ 607 if (cpu_vendor_id == CPU_VENDOR_INTEL && 608 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 609 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 610 if (bootverbose) 611 printf("TSC timecounter disables C2 and C3.\n"); 612 } 613 614 /* 615 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 616 * are synchronized. If the user is sure that the system has 617 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 618 * non-zero value. The TSC seems unreliable in virtualized SMP 619 * environments, so it is set to a negative quality in those cases. 620 */ 621 #ifdef SMP 622 if (mp_ncpus > 1) 623 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 624 else 625 #endif /* SMP */ 626 if (tsc_is_invariant) 627 tsc_timecounter.tc_quality = 1000; 628 max_freq >>= tsc_shift; 629 630 init: 631 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 632 ; 633 634 /* 635 * Timecounter implementation selection, top to bottom: 636 * - If RDTSCP is available, use RDTSCP. 637 * - If fence instructions are provided (SSE2), use LFENCE;RDTSC 638 * on Intel, and MFENCE;RDTSC on AMD. 639 * - For really old CPUs, just use RDTSC. 640 */ 641 if ((amd_feature & AMDID_RDTSCP) != 0) { 642 tsc_timecounter.tc_get_timecount = shift > 0 ? 643 tscp_get_timecount_low : tscp_get_timecount; 644 } else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 645 if (cpu_vendor_id == CPU_VENDOR_AMD || 646 cpu_vendor_id == CPU_VENDOR_HYGON) { 647 tsc_timecounter.tc_get_timecount = shift > 0 ? 648 tsc_get_timecount_low_mfence : 649 tsc_get_timecount_mfence; 650 } else { 651 tsc_timecounter.tc_get_timecount = shift > 0 ? 652 tsc_get_timecount_low_lfence : 653 tsc_get_timecount_lfence; 654 } 655 } else { 656 tsc_timecounter.tc_get_timecount = shift > 0 ? 657 tsc_get_timecount_low : tsc_get_timecount; 658 } 659 if (shift > 0) { 660 tsc_timecounter.tc_name = "TSC-low"; 661 if (bootverbose) 662 printf("TSC timecounter discards lower %d bit(s)\n", 663 shift); 664 } 665 if (tsc_freq != 0) { 666 tsc_timecounter.tc_frequency = tsc_freq >> shift; 667 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 668 tc_init(&tsc_timecounter); 669 } 670 } 671 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 672 673 void 674 resume_TSC(void) 675 { 676 #ifdef SMP 677 int quality; 678 679 /* If TSC was not good on boot, it is unlikely to become good now. */ 680 if (tsc_timecounter.tc_quality < 0) 681 return; 682 /* Nothing to do with UP. */ 683 if (mp_ncpus < 2) 684 return; 685 686 /* 687 * If TSC was good, a single synchronization should be enough, 688 * but honour smp_tsc_adjust if it's set. 689 */ 690 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 691 if (quality != tsc_timecounter.tc_quality) { 692 printf("TSC timecounter quality changed: %d -> %d\n", 693 tsc_timecounter.tc_quality, quality); 694 tsc_timecounter.tc_quality = quality; 695 } 696 #endif /* SMP */ 697 } 698 699 /* 700 * When cpufreq levels change, find out about the (new) max frequency. We 701 * use this to update CPU accounting in case it got a lower estimate at boot. 702 */ 703 static void 704 tsc_levels_changed(void *arg, int unit) 705 { 706 device_t cf_dev; 707 struct cf_level *levels; 708 int count, error; 709 uint64_t max_freq; 710 711 /* Only use values from the first CPU, assuming all are equal. */ 712 if (unit != 0) 713 return; 714 715 /* Find the appropriate cpufreq device instance. */ 716 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 717 if (cf_dev == NULL) { 718 printf("tsc_levels_changed() called but no cpufreq device?\n"); 719 return; 720 } 721 722 /* Get settings from the device and find the max frequency. */ 723 count = 64; 724 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 725 if (levels == NULL) 726 return; 727 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 728 if (error == 0 && count != 0) { 729 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 730 set_cputicker(rdtsc, max_freq, 1); 731 } else 732 printf("tsc_levels_changed: no max freq found\n"); 733 free(levels, M_TEMP); 734 } 735 736 /* 737 * If the TSC timecounter is in use, veto the pending change. It may be 738 * possible in the future to handle a dynamically-changing timecounter rate. 739 */ 740 static void 741 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 742 { 743 744 if (*status != 0 || timecounter != &tsc_timecounter) 745 return; 746 747 printf("timecounter TSC must not be in use when " 748 "changing frequencies; change denied\n"); 749 *status = EBUSY; 750 } 751 752 /* Update TSC freq with the value indicated by the caller. */ 753 static void 754 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 755 { 756 uint64_t freq; 757 758 /* If there was an error during the transition, don't do anything. */ 759 if (tsc_disabled || status != 0) 760 return; 761 762 /* Total setting for this level gives the new frequency in MHz. */ 763 freq = (uint64_t)level->total_set.freq * 1000000; 764 atomic_store_rel_64(&tsc_freq, freq); 765 tsc_timecounter.tc_frequency = 766 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 767 } 768 769 static int 770 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 771 { 772 int error; 773 uint64_t freq; 774 775 freq = atomic_load_acq_64(&tsc_freq); 776 if (freq == 0) 777 return (EOPNOTSUPP); 778 error = sysctl_handle_64(oidp, &freq, 0, req); 779 if (error == 0 && req->newptr != NULL) { 780 atomic_store_rel_64(&tsc_freq, freq); 781 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 782 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 783 } 784 return (error); 785 } 786 787 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, 788 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 789 0, 0, sysctl_machdep_tsc_freq, "QU", 790 "Time Stamp Counter frequency"); 791 792 static u_int 793 tsc_get_timecount(struct timecounter *tc __unused) 794 { 795 796 return (rdtsc32()); 797 } 798 799 static u_int 800 tscp_get_timecount(struct timecounter *tc __unused) 801 { 802 803 return (rdtscp32()); 804 } 805 806 static inline u_int 807 tsc_get_timecount_low(struct timecounter *tc) 808 { 809 uint32_t rv; 810 811 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 812 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 813 return (rv); 814 } 815 816 static u_int 817 tscp_get_timecount_low(struct timecounter *tc) 818 { 819 uint32_t rv; 820 821 __asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0" 822 : "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx"); 823 return (rv); 824 } 825 826 static u_int 827 tsc_get_timecount_lfence(struct timecounter *tc __unused) 828 { 829 830 lfence(); 831 return (rdtsc32()); 832 } 833 834 static u_int 835 tsc_get_timecount_low_lfence(struct timecounter *tc) 836 { 837 838 lfence(); 839 return (tsc_get_timecount_low(tc)); 840 } 841 842 static u_int 843 tsc_get_timecount_mfence(struct timecounter *tc __unused) 844 { 845 846 mfence(); 847 return (rdtsc32()); 848 } 849 850 static u_int 851 tsc_get_timecount_low_mfence(struct timecounter *tc) 852 { 853 854 mfence(); 855 return (tsc_get_timecount_low(tc)); 856 } 857 858 static uint32_t 859 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 860 { 861 862 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 863 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 864 vdso_th->th_x86_hpet_idx = 0xffffffff; 865 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 866 return (1); 867 } 868 869 #ifdef COMPAT_FREEBSD32 870 static uint32_t 871 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 872 struct timecounter *tc) 873 { 874 875 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 876 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 877 vdso_th32->th_x86_hpet_idx = 0xffffffff; 878 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 879 return (1); 880 } 881 #endif 882