1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_clock.h" 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/cpu.h> 37 #include <sys/eventhandler.h> 38 #include <sys/limits.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/kernel.h> 45 #include <sys/power.h> 46 #include <sys/smp.h> 47 #include <sys/vdso.h> 48 #include <machine/clock.h> 49 #include <machine/cputypes.h> 50 #include <machine/md_var.h> 51 #include <machine/specialreg.h> 52 #include <x86/vmware.h> 53 #include <dev/acpica/acpi_hpet.h> 54 #include <contrib/dev/acpica/include/acpi.h> 55 56 #include "cpufreq_if.h" 57 58 uint64_t tsc_freq; 59 int tsc_is_invariant; 60 int tsc_perf_stat; 61 62 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 63 64 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 65 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 66 67 #ifdef SMP 68 int smp_tsc; 69 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 70 "Indicates whether the TSC is safe to use in SMP mode"); 71 72 int smp_tsc_adjust = 0; 73 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 74 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 75 #endif 76 77 static int tsc_shift = 1; 78 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 79 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 80 81 static int tsc_disabled; 82 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 83 "Disable x86 Time Stamp Counter"); 84 85 static int tsc_skip_calibration; 86 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 87 &tsc_skip_calibration, 0, 88 "Disable TSC frequency calibration"); 89 90 static void tsc_freq_changed(void *arg, const struct cf_level *level, 91 int status); 92 static void tsc_freq_changing(void *arg, const struct cf_level *level, 93 int *status); 94 static unsigned tsc_get_timecount(struct timecounter *tc); 95 static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 96 static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 97 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 98 static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 99 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 100 static void tsc_levels_changed(void *arg, int unit); 101 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 102 struct timecounter *tc); 103 #ifdef COMPAT_FREEBSD32 104 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 105 struct timecounter *tc); 106 #endif 107 108 static struct timecounter tsc_timecounter = { 109 .tc_get_timecount = tsc_get_timecount, 110 .tc_counter_mask = ~0u, 111 .tc_name = "TSC", 112 .tc_quality = 800, /* adjusted in code */ 113 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 114 #ifdef COMPAT_FREEBSD32 115 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 116 #endif 117 }; 118 119 static void 120 tsc_freq_vmware(void) 121 { 122 u_int regs[4]; 123 124 if (hv_high >= 0x40000010) { 125 do_cpuid(0x40000010, regs); 126 tsc_freq = regs[0] * 1000; 127 } else { 128 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 129 if (regs[1] != UINT_MAX) 130 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 131 } 132 tsc_is_invariant = 1; 133 } 134 135 /* 136 * Calculate TSC frequency using information from the CPUID leaf 0x15 137 * 'Time Stamp Counter and Nominal Core Crystal Clock'. If leaf 0x15 138 * is not functional, as it is on Skylake/Kabylake, try 0x16 'Processor 139 * Frequency Information'. Leaf 0x16 is described in the SDM as 140 * informational only, but if 0x15 did not work, and TSC calibration 141 * is disabled, it is the best we can get at all. It should still be 142 * an improvement over the parsing of the CPU model name in 143 * tsc_freq_intel(), when available. 144 */ 145 static bool 146 tsc_freq_cpuid(uint64_t *res) 147 { 148 u_int regs[4]; 149 150 if (cpu_high < 0x15) 151 return (false); 152 do_cpuid(0x15, regs); 153 if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) { 154 *res = (uint64_t)regs[2] * regs[1] / regs[0]; 155 return (true); 156 } 157 158 if (cpu_high < 0x16) 159 return (false); 160 do_cpuid(0x16, regs); 161 if (regs[0] != 0) { 162 *res = (uint64_t)regs[0] * 1000000; 163 return (true); 164 } 165 166 return (false); 167 } 168 169 static void 170 tsc_freq_intel(void) 171 { 172 char brand[48]; 173 u_int regs[4]; 174 uint64_t freq; 175 char *p; 176 u_int i; 177 178 /* 179 * Intel Processor Identification and the CPUID Instruction 180 * Application Note 485. 181 * http://www.intel.com/assets/pdf/appnote/241618.pdf 182 */ 183 if (cpu_exthigh >= 0x80000004) { 184 p = brand; 185 for (i = 0x80000002; i < 0x80000005; i++) { 186 do_cpuid(i, regs); 187 memcpy(p, regs, sizeof(regs)); 188 p += sizeof(regs); 189 } 190 p = NULL; 191 for (i = 0; i < sizeof(brand) - 1; i++) 192 if (brand[i] == 'H' && brand[i + 1] == 'z') 193 p = brand + i; 194 if (p != NULL) { 195 p -= 5; 196 switch (p[4]) { 197 case 'M': 198 i = 1; 199 break; 200 case 'G': 201 i = 1000; 202 break; 203 case 'T': 204 i = 1000000; 205 break; 206 default: 207 return; 208 } 209 #define C2D(c) ((c) - '0') 210 if (p[1] == '.') { 211 freq = C2D(p[0]) * 1000; 212 freq += C2D(p[2]) * 100; 213 freq += C2D(p[3]) * 10; 214 freq *= i * 1000; 215 } else { 216 freq = C2D(p[0]) * 1000; 217 freq += C2D(p[1]) * 100; 218 freq += C2D(p[2]) * 10; 219 freq += C2D(p[3]); 220 freq *= i * 1000000; 221 } 222 #undef C2D 223 tsc_freq = freq; 224 } 225 } 226 } 227 228 static void 229 probe_tsc_freq(void) 230 { 231 uint64_t tmp_freq, tsc1, tsc2; 232 int no_cpuid_override; 233 234 if (cpu_power_ecx & CPUID_PERF_STAT) { 235 /* 236 * XXX Some emulators expose host CPUID without actual support 237 * for these MSRs. We must test whether they really work. 238 */ 239 wrmsr(MSR_MPERF, 0); 240 wrmsr(MSR_APERF, 0); 241 DELAY(10); 242 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 243 tsc_perf_stat = 1; 244 } 245 246 if (vm_guest == VM_GUEST_VMWARE) { 247 tsc_freq_vmware(); 248 return; 249 } 250 251 switch (cpu_vendor_id) { 252 case CPU_VENDOR_AMD: 253 case CPU_VENDOR_HYGON: 254 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 255 (vm_guest == VM_GUEST_NO && 256 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 257 tsc_is_invariant = 1; 258 if (cpu_feature & CPUID_SSE2) { 259 tsc_timecounter.tc_get_timecount = 260 tsc_get_timecount_mfence; 261 } 262 break; 263 case CPU_VENDOR_INTEL: 264 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 265 (vm_guest == VM_GUEST_NO && 266 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 267 CPUID_TO_MODEL(cpu_id) >= 0xe) || 268 (CPUID_TO_FAMILY(cpu_id) == 0xf && 269 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 270 tsc_is_invariant = 1; 271 if (cpu_feature & CPUID_SSE2) { 272 tsc_timecounter.tc_get_timecount = 273 tsc_get_timecount_lfence; 274 } 275 break; 276 case CPU_VENDOR_CENTAUR: 277 if (vm_guest == VM_GUEST_NO && 278 CPUID_TO_FAMILY(cpu_id) == 0x6 && 279 CPUID_TO_MODEL(cpu_id) >= 0xf && 280 (rdmsr(0x1203) & 0x100000000ULL) == 0) 281 tsc_is_invariant = 1; 282 if (cpu_feature & CPUID_SSE2) { 283 tsc_timecounter.tc_get_timecount = 284 tsc_get_timecount_lfence; 285 } 286 break; 287 } 288 289 if (tsc_skip_calibration) { 290 if (tsc_freq_cpuid(&tmp_freq)) 291 tsc_freq = tmp_freq; 292 else if (cpu_vendor_id == CPU_VENDOR_INTEL) 293 tsc_freq_intel(); 294 if (tsc_freq == 0) 295 tsc_disabled = 1; 296 } else { 297 if (bootverbose) 298 printf("Calibrating TSC clock ... "); 299 tsc1 = rdtsc(); 300 DELAY(1000000); 301 tsc2 = rdtsc(); 302 tsc_freq = tsc2 - tsc1; 303 304 /* 305 * If the difference between calibrated frequency and 306 * the frequency reported by CPUID 0x15/0x16 leafs 307 * differ significantly, this probably means that 308 * calibration is bogus. It happens on machines 309 * without 8254 timer. The BIOS rarely properly 310 * reports it in FADT boot flags, so just compare the 311 * frequencies directly. 312 */ 313 if (tsc_freq_cpuid(&tmp_freq) && qabs(tsc_freq - tmp_freq) > 314 uqmin(tsc_freq, tmp_freq)) { 315 no_cpuid_override = 0; 316 TUNABLE_INT_FETCH("machdep.disable_tsc_cpuid_override", 317 &no_cpuid_override); 318 if (!no_cpuid_override) { 319 if (bootverbose) { 320 printf( 321 "TSC clock: calibration freq %ju Hz, CPUID freq %ju Hz%s\n", 322 (uintmax_t)tsc_freq, 323 (uintmax_t)tmp_freq, 324 no_cpuid_override ? "" : 325 ", doing CPUID override"); 326 } 327 tsc_freq = tmp_freq; 328 } 329 } 330 } 331 if (bootverbose) 332 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 333 } 334 335 void 336 init_TSC(void) 337 { 338 339 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 340 return; 341 342 #ifdef __i386__ 343 /* The TSC is known to be broken on certain CPUs. */ 344 switch (cpu_vendor_id) { 345 case CPU_VENDOR_AMD: 346 switch (cpu_id & 0xFF0) { 347 case 0x500: 348 /* K5 Model 0 */ 349 return; 350 } 351 break; 352 case CPU_VENDOR_CENTAUR: 353 switch (cpu_id & 0xff0) { 354 case 0x540: 355 /* 356 * http://www.centtech.com/c6_data_sheet.pdf 357 * 358 * I-12 RDTSC may return incoherent values in EDX:EAX 359 * I-13 RDTSC hangs when certain event counters are used 360 */ 361 return; 362 } 363 break; 364 case CPU_VENDOR_NSC: 365 switch (cpu_id & 0xff0) { 366 case 0x540: 367 if ((cpu_id & CPUID_STEPPING) == 0) 368 return; 369 break; 370 } 371 break; 372 } 373 #endif 374 375 probe_tsc_freq(); 376 377 /* 378 * Inform CPU accounting about our boot-time clock rate. This will 379 * be updated if someone loads a cpufreq driver after boot that 380 * discovers a new max frequency. 381 */ 382 if (tsc_freq != 0) 383 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 384 385 if (tsc_is_invariant) 386 return; 387 388 /* Register to find out about changes in CPU frequency. */ 389 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 390 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 391 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 392 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 393 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 394 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 395 } 396 397 #ifdef SMP 398 399 /* 400 * RDTSC is not a serializing instruction, and does not drain 401 * instruction stream, so we need to drain the stream before executing 402 * it. It could be fixed by use of RDTSCP, except the instruction is 403 * not available everywhere. 404 * 405 * Use CPUID for draining in the boot-time SMP constistency test. The 406 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 407 * and VIA) when SSE2 is present, and nothing on older machines which 408 * also do not issue RDTSC prematurely. There, testing for SSE2 and 409 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 410 * 411 * Do not use do_cpuid(), since we do not need CPUID results, which 412 * have to be written into memory with do_cpuid(). 413 */ 414 #define TSC_READ(x) \ 415 static void \ 416 tsc_read_##x(void *arg) \ 417 { \ 418 uint64_t *tsc = arg; \ 419 u_int cpu = PCPU_GET(cpuid); \ 420 \ 421 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 422 tsc[cpu * 3 + x] = rdtsc(); \ 423 } 424 TSC_READ(0) 425 TSC_READ(1) 426 TSC_READ(2) 427 #undef TSC_READ 428 429 #define N 1000 430 431 static void 432 comp_smp_tsc(void *arg) 433 { 434 uint64_t *tsc; 435 int64_t d1, d2; 436 u_int cpu = PCPU_GET(cpuid); 437 u_int i, j, size; 438 439 size = (mp_maxid + 1) * 3; 440 for (i = 0, tsc = arg; i < N; i++, tsc += size) 441 CPU_FOREACH(j) { 442 if (j == cpu) 443 continue; 444 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 445 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 446 if (d1 <= 0 || d2 <= 0) { 447 smp_tsc = 0; 448 return; 449 } 450 } 451 } 452 453 static void 454 adj_smp_tsc(void *arg) 455 { 456 uint64_t *tsc; 457 int64_t d, min, max; 458 u_int cpu = PCPU_GET(cpuid); 459 u_int first, i, size; 460 461 first = CPU_FIRST(); 462 if (cpu == first) 463 return; 464 min = INT64_MIN; 465 max = INT64_MAX; 466 size = (mp_maxid + 1) * 3; 467 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 468 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 469 if (d > min) 470 min = d; 471 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 472 if (d > min) 473 min = d; 474 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 475 if (d < max) 476 max = d; 477 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 478 if (d < max) 479 max = d; 480 } 481 if (min > max) 482 return; 483 d = min / 2 + max / 2; 484 __asm __volatile ( 485 "movl $0x10, %%ecx\n\t" 486 "rdmsr\n\t" 487 "addl %%edi, %%eax\n\t" 488 "adcl %%esi, %%edx\n\t" 489 "wrmsr\n" 490 : /* No output */ 491 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 492 : "ax", "cx", "dx", "cc" 493 ); 494 } 495 496 static int 497 test_tsc(int adj_max_count) 498 { 499 uint64_t *data, *tsc; 500 u_int i, size, adj; 501 502 if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 503 return (-100); 504 size = (mp_maxid + 1) * 3; 505 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 506 adj = 0; 507 retry: 508 for (i = 0, tsc = data; i < N; i++, tsc += size) 509 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 510 smp_tsc = 1; /* XXX */ 511 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 512 smp_no_rendezvous_barrier, data); 513 if (!smp_tsc && adj < adj_max_count) { 514 adj++; 515 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 516 smp_no_rendezvous_barrier, data); 517 goto retry; 518 } 519 free(data, M_TEMP); 520 if (bootverbose) 521 printf("SMP: %sed TSC synchronization test%s\n", 522 smp_tsc ? "pass" : "fail", 523 adj > 0 ? " after adjustment" : ""); 524 if (smp_tsc && tsc_is_invariant) { 525 switch (cpu_vendor_id) { 526 case CPU_VENDOR_AMD: 527 case CPU_VENDOR_HYGON: 528 /* 529 * Processor Programming Reference (PPR) for AMD 530 * Family 17h states that the TSC uses a common 531 * reference for all sockets, cores and threads. 532 */ 533 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) 534 return (1000); 535 /* 536 * Starting with Family 15h processors, TSC clock 537 * source is in the north bridge. Check whether 538 * we have a single-socket/multi-core platform. 539 * XXX Need more work for complex cases. 540 */ 541 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 542 (amd_feature2 & AMDID2_CMP) == 0 || 543 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 544 break; 545 return (1000); 546 case CPU_VENDOR_INTEL: 547 /* 548 * XXX Assume Intel platforms have synchronized TSCs. 549 */ 550 return (1000); 551 } 552 return (800); 553 } 554 return (-100); 555 } 556 557 #undef N 558 559 #endif /* SMP */ 560 561 static void 562 init_TSC_tc(void) 563 { 564 uint64_t max_freq; 565 int shift; 566 567 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 568 return; 569 570 /* 571 * Limit timecounter frequency to fit in an int and prevent it from 572 * overflowing too fast. 573 */ 574 max_freq = UINT_MAX; 575 576 /* 577 * We can not use the TSC if we support APM. Precise timekeeping 578 * on an APM'ed machine is at best a fools pursuit, since 579 * any and all of the time spent in various SMM code can't 580 * be reliably accounted for. Reading the RTC is your only 581 * source of reliable time info. The i8254 loses too, of course, 582 * but we need to have some kind of time... 583 * We don't know at this point whether APM is going to be used 584 * or not, nor when it might be activated. Play it safe. 585 */ 586 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 587 tsc_timecounter.tc_quality = -1000; 588 if (bootverbose) 589 printf("TSC timecounter disabled: APM enabled.\n"); 590 goto init; 591 } 592 593 /* 594 * Intel CPUs without a C-state invariant TSC can stop the TSC 595 * in either C2 or C3. Disable use of C2 and C3 while using 596 * the TSC as the timecounter. The timecounter can be changed 597 * to enable C2 and C3. 598 * 599 * Note that the TSC is used as the cputicker for computing 600 * thread runtime regardless of the timecounter setting, so 601 * using an alternate timecounter and enabling C2 or C3 can 602 * result incorrect runtimes for kernel idle threads (but not 603 * for any non-idle threads). 604 */ 605 if (cpu_vendor_id == CPU_VENDOR_INTEL && 606 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 607 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 608 if (bootverbose) 609 printf("TSC timecounter disables C2 and C3.\n"); 610 } 611 612 /* 613 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 614 * are synchronized. If the user is sure that the system has 615 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 616 * non-zero value. The TSC seems unreliable in virtualized SMP 617 * environments, so it is set to a negative quality in those cases. 618 */ 619 #ifdef SMP 620 if (mp_ncpus > 1) 621 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 622 else 623 #endif /* SMP */ 624 if (tsc_is_invariant) 625 tsc_timecounter.tc_quality = 1000; 626 max_freq >>= tsc_shift; 627 628 init: 629 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 630 ; 631 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 632 if (cpu_vendor_id == CPU_VENDOR_AMD || 633 cpu_vendor_id == CPU_VENDOR_HYGON) { 634 tsc_timecounter.tc_get_timecount = shift > 0 ? 635 tsc_get_timecount_low_mfence : 636 tsc_get_timecount_mfence; 637 } else { 638 tsc_timecounter.tc_get_timecount = shift > 0 ? 639 tsc_get_timecount_low_lfence : 640 tsc_get_timecount_lfence; 641 } 642 } else { 643 tsc_timecounter.tc_get_timecount = shift > 0 ? 644 tsc_get_timecount_low : tsc_get_timecount; 645 } 646 if (shift > 0) { 647 tsc_timecounter.tc_name = "TSC-low"; 648 if (bootverbose) 649 printf("TSC timecounter discards lower %d bit(s)\n", 650 shift); 651 } 652 if (tsc_freq != 0) { 653 tsc_timecounter.tc_frequency = tsc_freq >> shift; 654 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 655 tc_init(&tsc_timecounter); 656 } 657 } 658 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 659 660 void 661 resume_TSC(void) 662 { 663 #ifdef SMP 664 int quality; 665 666 /* If TSC was not good on boot, it is unlikely to become good now. */ 667 if (tsc_timecounter.tc_quality < 0) 668 return; 669 /* Nothing to do with UP. */ 670 if (mp_ncpus < 2) 671 return; 672 673 /* 674 * If TSC was good, a single synchronization should be enough, 675 * but honour smp_tsc_adjust if it's set. 676 */ 677 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 678 if (quality != tsc_timecounter.tc_quality) { 679 printf("TSC timecounter quality changed: %d -> %d\n", 680 tsc_timecounter.tc_quality, quality); 681 tsc_timecounter.tc_quality = quality; 682 } 683 #endif /* SMP */ 684 } 685 686 /* 687 * When cpufreq levels change, find out about the (new) max frequency. We 688 * use this to update CPU accounting in case it got a lower estimate at boot. 689 */ 690 static void 691 tsc_levels_changed(void *arg, int unit) 692 { 693 device_t cf_dev; 694 struct cf_level *levels; 695 int count, error; 696 uint64_t max_freq; 697 698 /* Only use values from the first CPU, assuming all are equal. */ 699 if (unit != 0) 700 return; 701 702 /* Find the appropriate cpufreq device instance. */ 703 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 704 if (cf_dev == NULL) { 705 printf("tsc_levels_changed() called but no cpufreq device?\n"); 706 return; 707 } 708 709 /* Get settings from the device and find the max frequency. */ 710 count = 64; 711 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 712 if (levels == NULL) 713 return; 714 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 715 if (error == 0 && count != 0) { 716 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 717 set_cputicker(rdtsc, max_freq, 1); 718 } else 719 printf("tsc_levels_changed: no max freq found\n"); 720 free(levels, M_TEMP); 721 } 722 723 /* 724 * If the TSC timecounter is in use, veto the pending change. It may be 725 * possible in the future to handle a dynamically-changing timecounter rate. 726 */ 727 static void 728 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 729 { 730 731 if (*status != 0 || timecounter != &tsc_timecounter) 732 return; 733 734 printf("timecounter TSC must not be in use when " 735 "changing frequencies; change denied\n"); 736 *status = EBUSY; 737 } 738 739 /* Update TSC freq with the value indicated by the caller. */ 740 static void 741 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 742 { 743 uint64_t freq; 744 745 /* If there was an error during the transition, don't do anything. */ 746 if (tsc_disabled || status != 0) 747 return; 748 749 /* Total setting for this level gives the new frequency in MHz. */ 750 freq = (uint64_t)level->total_set.freq * 1000000; 751 atomic_store_rel_64(&tsc_freq, freq); 752 tsc_timecounter.tc_frequency = 753 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 754 } 755 756 static int 757 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 758 { 759 int error; 760 uint64_t freq; 761 762 freq = atomic_load_acq_64(&tsc_freq); 763 if (freq == 0) 764 return (EOPNOTSUPP); 765 error = sysctl_handle_64(oidp, &freq, 0, req); 766 if (error == 0 && req->newptr != NULL) { 767 atomic_store_rel_64(&tsc_freq, freq); 768 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 769 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 770 } 771 return (error); 772 } 773 774 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, 775 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 776 0, 0, sysctl_machdep_tsc_freq, "QU", 777 "Time Stamp Counter frequency"); 778 779 static u_int 780 tsc_get_timecount(struct timecounter *tc __unused) 781 { 782 783 return (rdtsc32()); 784 } 785 786 static inline u_int 787 tsc_get_timecount_low(struct timecounter *tc) 788 { 789 uint32_t rv; 790 791 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 792 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 793 return (rv); 794 } 795 796 static u_int 797 tsc_get_timecount_lfence(struct timecounter *tc __unused) 798 { 799 800 lfence(); 801 return (rdtsc32()); 802 } 803 804 static u_int 805 tsc_get_timecount_low_lfence(struct timecounter *tc) 806 { 807 808 lfence(); 809 return (tsc_get_timecount_low(tc)); 810 } 811 812 static u_int 813 tsc_get_timecount_mfence(struct timecounter *tc __unused) 814 { 815 816 mfence(); 817 return (rdtsc32()); 818 } 819 820 static u_int 821 tsc_get_timecount_low_mfence(struct timecounter *tc) 822 { 823 824 mfence(); 825 return (tsc_get_timecount_low(tc)); 826 } 827 828 static uint32_t 829 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 830 { 831 832 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 833 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 834 vdso_th->th_x86_hpet_idx = 0xffffffff; 835 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 836 return (1); 837 } 838 839 #ifdef COMPAT_FREEBSD32 840 static uint32_t 841 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 842 struct timecounter *tc) 843 { 844 845 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 846 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 847 vdso_th32->th_x86_hpet_idx = 0xffffffff; 848 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 849 return (1); 850 } 851 #endif 852