1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_clock.h" 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/cpu.h> 37 #include <sys/eventhandler.h> 38 #include <sys/limits.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/kernel.h> 45 #include <sys/smp.h> 46 #include <sys/vdso.h> 47 #include <machine/clock.h> 48 #include <machine/cputypes.h> 49 #include <machine/md_var.h> 50 #include <machine/specialreg.h> 51 #include <x86/vmware.h> 52 #include <dev/acpica/acpi_hpet.h> 53 #include <contrib/dev/acpica/include/acpi.h> 54 55 #include "cpufreq_if.h" 56 57 uint64_t tsc_freq; 58 int tsc_is_invariant; 59 int tsc_perf_stat; 60 61 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 62 63 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 64 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 65 66 #ifdef SMP 67 int smp_tsc; 68 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 69 "Indicates whether the TSC is safe to use in SMP mode"); 70 71 int smp_tsc_adjust = 0; 72 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 73 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 74 #endif 75 76 static int tsc_shift = 1; 77 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 78 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 79 80 static int tsc_disabled; 81 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 82 "Disable x86 Time Stamp Counter"); 83 84 static int tsc_skip_calibration; 85 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 86 &tsc_skip_calibration, 0, 87 "Disable TSC frequency calibration"); 88 89 static void tsc_freq_changed(void *arg, const struct cf_level *level, 90 int status); 91 static void tsc_freq_changing(void *arg, const struct cf_level *level, 92 int *status); 93 static u_int tsc_get_timecount(struct timecounter *tc); 94 static inline u_int tsc_get_timecount_low(struct timecounter *tc); 95 static u_int tsc_get_timecount_lfence(struct timecounter *tc); 96 static u_int tsc_get_timecount_low_lfence(struct timecounter *tc); 97 static u_int tsc_get_timecount_mfence(struct timecounter *tc); 98 static u_int tsc_get_timecount_low_mfence(struct timecounter *tc); 99 static u_int tscp_get_timecount(struct timecounter *tc); 100 static u_int tscp_get_timecount_low(struct timecounter *tc); 101 static void tsc_levels_changed(void *arg, int unit); 102 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 103 struct timecounter *tc); 104 #ifdef COMPAT_FREEBSD32 105 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 106 struct timecounter *tc); 107 #endif 108 109 static struct timecounter tsc_timecounter = { 110 .tc_get_timecount = tsc_get_timecount, 111 .tc_counter_mask = ~0u, 112 .tc_name = "TSC", 113 .tc_quality = 800, /* adjusted in code */ 114 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 115 #ifdef COMPAT_FREEBSD32 116 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 117 #endif 118 }; 119 120 static void 121 tsc_freq_vmware(void) 122 { 123 u_int regs[4]; 124 125 if (hv_high >= 0x40000010) { 126 do_cpuid(0x40000010, regs); 127 tsc_freq = regs[0] * 1000; 128 } else { 129 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 130 if (regs[1] != UINT_MAX) 131 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 132 } 133 tsc_is_invariant = 1; 134 } 135 136 /* 137 * Calculate TSC frequency using information from the CPUID leaf 0x15 138 * 'Time Stamp Counter and Nominal Core Crystal Clock'. If leaf 0x15 139 * is not functional, as it is on Skylake/Kabylake, try 0x16 'Processor 140 * Frequency Information'. Leaf 0x16 is described in the SDM as 141 * informational only, but if 0x15 did not work, and TSC calibration 142 * is disabled, it is the best we can get at all. It should still be 143 * an improvement over the parsing of the CPU model name in 144 * tsc_freq_intel(), when available. 145 */ 146 static bool 147 tsc_freq_cpuid(uint64_t *res) 148 { 149 u_int regs[4]; 150 151 if (cpu_high < 0x15) 152 return (false); 153 do_cpuid(0x15, regs); 154 if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) { 155 *res = (uint64_t)regs[2] * regs[1] / regs[0]; 156 return (true); 157 } 158 159 if (cpu_high < 0x16) 160 return (false); 161 do_cpuid(0x16, regs); 162 if (regs[0] != 0) { 163 *res = (uint64_t)regs[0] * 1000000; 164 return (true); 165 } 166 167 return (false); 168 } 169 170 static void 171 tsc_freq_intel(void) 172 { 173 char brand[48]; 174 u_int regs[4]; 175 uint64_t freq; 176 char *p; 177 u_int i; 178 179 /* 180 * Intel Processor Identification and the CPUID Instruction 181 * Application Note 485. 182 * http://www.intel.com/assets/pdf/appnote/241618.pdf 183 */ 184 if (cpu_exthigh >= 0x80000004) { 185 p = brand; 186 for (i = 0x80000002; i < 0x80000005; i++) { 187 do_cpuid(i, regs); 188 memcpy(p, regs, sizeof(regs)); 189 p += sizeof(regs); 190 } 191 p = NULL; 192 for (i = 0; i < sizeof(brand) - 1; i++) 193 if (brand[i] == 'H' && brand[i + 1] == 'z') 194 p = brand + i; 195 if (p != NULL) { 196 p -= 5; 197 switch (p[4]) { 198 case 'M': 199 i = 1; 200 break; 201 case 'G': 202 i = 1000; 203 break; 204 case 'T': 205 i = 1000000; 206 break; 207 default: 208 return; 209 } 210 #define C2D(c) ((c) - '0') 211 if (p[1] == '.') { 212 freq = C2D(p[0]) * 1000; 213 freq += C2D(p[2]) * 100; 214 freq += C2D(p[3]) * 10; 215 freq *= i * 1000; 216 } else { 217 freq = C2D(p[0]) * 1000; 218 freq += C2D(p[1]) * 100; 219 freq += C2D(p[2]) * 10; 220 freq += C2D(p[3]); 221 freq *= i * 1000000; 222 } 223 #undef C2D 224 tsc_freq = freq; 225 } 226 } 227 } 228 229 static void 230 probe_tsc_freq(void) 231 { 232 uint64_t tmp_freq, tsc1, tsc2; 233 int no_cpuid_override; 234 235 if (cpu_power_ecx & CPUID_PERF_STAT) { 236 /* 237 * XXX Some emulators expose host CPUID without actual support 238 * for these MSRs. We must test whether they really work. 239 */ 240 wrmsr(MSR_MPERF, 0); 241 wrmsr(MSR_APERF, 0); 242 DELAY(10); 243 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 244 tsc_perf_stat = 1; 245 } 246 247 if (vm_guest == VM_GUEST_VMWARE) { 248 tsc_freq_vmware(); 249 return; 250 } 251 252 switch (cpu_vendor_id) { 253 case CPU_VENDOR_AMD: 254 case CPU_VENDOR_HYGON: 255 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 256 (vm_guest == VM_GUEST_NO && 257 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 258 tsc_is_invariant = 1; 259 if (cpu_feature & CPUID_SSE2) { 260 tsc_timecounter.tc_get_timecount = 261 tsc_get_timecount_mfence; 262 } 263 break; 264 case CPU_VENDOR_INTEL: 265 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 266 (vm_guest == VM_GUEST_NO && 267 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 268 CPUID_TO_MODEL(cpu_id) >= 0xe) || 269 (CPUID_TO_FAMILY(cpu_id) == 0xf && 270 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 271 tsc_is_invariant = 1; 272 if (cpu_feature & CPUID_SSE2) { 273 tsc_timecounter.tc_get_timecount = 274 tsc_get_timecount_lfence; 275 } 276 break; 277 case CPU_VENDOR_CENTAUR: 278 if (vm_guest == VM_GUEST_NO && 279 CPUID_TO_FAMILY(cpu_id) == 0x6 && 280 CPUID_TO_MODEL(cpu_id) >= 0xf && 281 (rdmsr(0x1203) & 0x100000000ULL) == 0) 282 tsc_is_invariant = 1; 283 if (cpu_feature & CPUID_SSE2) { 284 tsc_timecounter.tc_get_timecount = 285 tsc_get_timecount_lfence; 286 } 287 break; 288 } 289 290 if (tsc_skip_calibration) { 291 if (tsc_freq_cpuid(&tmp_freq)) 292 tsc_freq = tmp_freq; 293 else if (cpu_vendor_id == CPU_VENDOR_INTEL) 294 tsc_freq_intel(); 295 if (tsc_freq == 0) 296 tsc_disabled = 1; 297 } else { 298 if (bootverbose) 299 printf("Calibrating TSC clock ... "); 300 tsc1 = rdtsc(); 301 DELAY(1000000); 302 tsc2 = rdtsc(); 303 tsc_freq = tsc2 - tsc1; 304 305 /* 306 * If the difference between calibrated frequency and 307 * the frequency reported by CPUID 0x15/0x16 leafs 308 * differ significantly, this probably means that 309 * calibration is bogus. It happens on machines 310 * without 8254 timer. The BIOS rarely properly 311 * reports it in FADT boot flags, so just compare the 312 * frequencies directly. 313 */ 314 if (tsc_freq_cpuid(&tmp_freq) && qabs(tsc_freq - tmp_freq) > 315 uqmin(tsc_freq, tmp_freq)) { 316 no_cpuid_override = 0; 317 TUNABLE_INT_FETCH("machdep.disable_tsc_cpuid_override", 318 &no_cpuid_override); 319 if (!no_cpuid_override) { 320 if (bootverbose) { 321 printf( 322 "TSC clock: calibration freq %ju Hz, CPUID freq %ju Hz%s\n", 323 (uintmax_t)tsc_freq, 324 (uintmax_t)tmp_freq, 325 no_cpuid_override ? "" : 326 ", doing CPUID override"); 327 } 328 tsc_freq = tmp_freq; 329 } 330 } 331 } 332 if (bootverbose) 333 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 334 } 335 336 void 337 init_TSC(void) 338 { 339 340 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 341 return; 342 343 #ifdef __i386__ 344 /* The TSC is known to be broken on certain CPUs. */ 345 switch (cpu_vendor_id) { 346 case CPU_VENDOR_AMD: 347 switch (cpu_id & 0xFF0) { 348 case 0x500: 349 /* K5 Model 0 */ 350 return; 351 } 352 break; 353 case CPU_VENDOR_CENTAUR: 354 switch (cpu_id & 0xff0) { 355 case 0x540: 356 /* 357 * http://www.centtech.com/c6_data_sheet.pdf 358 * 359 * I-12 RDTSC may return incoherent values in EDX:EAX 360 * I-13 RDTSC hangs when certain event counters are used 361 */ 362 return; 363 } 364 break; 365 case CPU_VENDOR_NSC: 366 switch (cpu_id & 0xff0) { 367 case 0x540: 368 if ((cpu_id & CPUID_STEPPING) == 0) 369 return; 370 break; 371 } 372 break; 373 } 374 #endif 375 376 probe_tsc_freq(); 377 378 /* 379 * Inform CPU accounting about our boot-time clock rate. This will 380 * be updated if someone loads a cpufreq driver after boot that 381 * discovers a new max frequency. 382 */ 383 if (tsc_freq != 0) 384 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 385 386 if (tsc_is_invariant) 387 return; 388 389 /* Register to find out about changes in CPU frequency. */ 390 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 391 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 392 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 393 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 394 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 395 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 396 } 397 398 #ifdef SMP 399 400 /* 401 * RDTSC is not a serializing instruction, and does not drain 402 * instruction stream, so we need to drain the stream before executing 403 * it. It could be fixed by use of RDTSCP, except the instruction is 404 * not available everywhere. 405 * 406 * Use CPUID for draining in the boot-time SMP constistency test. The 407 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 408 * and VIA) when SSE2 is present, and nothing on older machines which 409 * also do not issue RDTSC prematurely. There, testing for SSE2 and 410 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 411 * 412 * Do not use do_cpuid(), since we do not need CPUID results, which 413 * have to be written into memory with do_cpuid(). 414 */ 415 #define TSC_READ(x) \ 416 static void \ 417 tsc_read_##x(void *arg) \ 418 { \ 419 uint64_t *tsc = arg; \ 420 u_int cpu = PCPU_GET(cpuid); \ 421 \ 422 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 423 tsc[cpu * 3 + x] = rdtsc(); \ 424 } 425 TSC_READ(0) 426 TSC_READ(1) 427 TSC_READ(2) 428 #undef TSC_READ 429 430 #define N 1000 431 432 static void 433 comp_smp_tsc(void *arg) 434 { 435 uint64_t *tsc; 436 int64_t d1, d2; 437 u_int cpu = PCPU_GET(cpuid); 438 u_int i, j, size; 439 440 size = (mp_maxid + 1) * 3; 441 for (i = 0, tsc = arg; i < N; i++, tsc += size) 442 CPU_FOREACH(j) { 443 if (j == cpu) 444 continue; 445 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 446 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 447 if (d1 <= 0 || d2 <= 0) { 448 smp_tsc = 0; 449 return; 450 } 451 } 452 } 453 454 static void 455 adj_smp_tsc(void *arg) 456 { 457 uint64_t *tsc; 458 int64_t d, min, max; 459 u_int cpu = PCPU_GET(cpuid); 460 u_int first, i, size; 461 462 first = CPU_FIRST(); 463 if (cpu == first) 464 return; 465 min = INT64_MIN; 466 max = INT64_MAX; 467 size = (mp_maxid + 1) * 3; 468 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 469 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 470 if (d > min) 471 min = d; 472 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 473 if (d > min) 474 min = d; 475 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 476 if (d < max) 477 max = d; 478 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 479 if (d < max) 480 max = d; 481 } 482 if (min > max) 483 return; 484 d = min / 2 + max / 2; 485 __asm __volatile ( 486 "movl $0x10, %%ecx\n\t" 487 "rdmsr\n\t" 488 "addl %%edi, %%eax\n\t" 489 "adcl %%esi, %%edx\n\t" 490 "wrmsr\n" 491 : /* No output */ 492 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 493 : "ax", "cx", "dx", "cc" 494 ); 495 } 496 497 static int 498 test_tsc(int adj_max_count) 499 { 500 uint64_t *data, *tsc; 501 u_int i, size, adj; 502 503 if ((!smp_tsc && !tsc_is_invariant)) 504 return (-100); 505 /* 506 * Misbehavior of TSC under VirtualBox has been observed. In 507 * particular, threads doing small (~1 second) sleeps may miss their 508 * wakeup and hang around in sleep state, causing hangs on shutdown. 509 */ 510 if (vm_guest == VM_GUEST_VBOX) 511 return (0); 512 513 TSENTER(); 514 size = (mp_maxid + 1) * 3; 515 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 516 adj = 0; 517 retry: 518 for (i = 0, tsc = data; i < N; i++, tsc += size) 519 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 520 smp_tsc = 1; /* XXX */ 521 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 522 smp_no_rendezvous_barrier, data); 523 if (!smp_tsc && adj < adj_max_count) { 524 adj++; 525 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 526 smp_no_rendezvous_barrier, data); 527 goto retry; 528 } 529 free(data, M_TEMP); 530 if (bootverbose) 531 printf("SMP: %sed TSC synchronization test%s\n", 532 smp_tsc ? "pass" : "fail", 533 adj > 0 ? " after adjustment" : ""); 534 TSEXIT(); 535 if (smp_tsc && tsc_is_invariant) { 536 switch (cpu_vendor_id) { 537 case CPU_VENDOR_AMD: 538 case CPU_VENDOR_HYGON: 539 /* 540 * Processor Programming Reference (PPR) for AMD 541 * Family 17h states that the TSC uses a common 542 * reference for all sockets, cores and threads. 543 */ 544 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) 545 return (1000); 546 /* 547 * Starting with Family 15h processors, TSC clock 548 * source is in the north bridge. Check whether 549 * we have a single-socket/multi-core platform. 550 * XXX Need more work for complex cases. 551 */ 552 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 553 (amd_feature2 & AMDID2_CMP) == 0 || 554 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 555 break; 556 return (1000); 557 case CPU_VENDOR_INTEL: 558 /* 559 * XXX Assume Intel platforms have synchronized TSCs. 560 */ 561 return (1000); 562 } 563 return (800); 564 } 565 return (-100); 566 } 567 568 #undef N 569 570 #endif /* SMP */ 571 572 static void 573 init_TSC_tc(void) 574 { 575 uint64_t max_freq; 576 int shift; 577 578 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 579 return; 580 581 /* 582 * Limit timecounter frequency to fit in an int and prevent it from 583 * overflowing too fast. 584 */ 585 max_freq = UINT_MAX; 586 587 /* 588 * Intel CPUs without a C-state invariant TSC can stop the TSC 589 * in either C2 or C3. Disable use of C2 and C3 while using 590 * the TSC as the timecounter. The timecounter can be changed 591 * to enable C2 and C3. 592 * 593 * Note that the TSC is used as the cputicker for computing 594 * thread runtime regardless of the timecounter setting, so 595 * using an alternate timecounter and enabling C2 or C3 can 596 * result incorrect runtimes for kernel idle threads (but not 597 * for any non-idle threads). 598 */ 599 if (cpu_vendor_id == CPU_VENDOR_INTEL && 600 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 601 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 602 if (bootverbose) 603 printf("TSC timecounter disables C2 and C3.\n"); 604 } 605 606 /* 607 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 608 * are synchronized. If the user is sure that the system has 609 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 610 * non-zero value. The TSC seems unreliable in virtualized SMP 611 * environments, so it is set to a negative quality in those cases. 612 */ 613 #ifdef SMP 614 if (mp_ncpus > 1) 615 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 616 else 617 #endif /* SMP */ 618 if (tsc_is_invariant) 619 tsc_timecounter.tc_quality = 1000; 620 max_freq >>= tsc_shift; 621 622 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 623 ; 624 625 /* 626 * Timecounter implementation selection, top to bottom: 627 * - If RDTSCP is available, use RDTSCP. 628 * - If fence instructions are provided (SSE2), use LFENCE;RDTSC 629 * on Intel, and MFENCE;RDTSC on AMD. 630 * - For really old CPUs, just use RDTSC. 631 */ 632 if ((amd_feature & AMDID_RDTSCP) != 0) { 633 tsc_timecounter.tc_get_timecount = shift > 0 ? 634 tscp_get_timecount_low : tscp_get_timecount; 635 } else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 636 if (cpu_vendor_id == CPU_VENDOR_AMD || 637 cpu_vendor_id == CPU_VENDOR_HYGON) { 638 tsc_timecounter.tc_get_timecount = shift > 0 ? 639 tsc_get_timecount_low_mfence : 640 tsc_get_timecount_mfence; 641 } else { 642 tsc_timecounter.tc_get_timecount = shift > 0 ? 643 tsc_get_timecount_low_lfence : 644 tsc_get_timecount_lfence; 645 } 646 } else { 647 tsc_timecounter.tc_get_timecount = shift > 0 ? 648 tsc_get_timecount_low : tsc_get_timecount; 649 } 650 if (shift > 0) { 651 tsc_timecounter.tc_name = "TSC-low"; 652 if (bootverbose) 653 printf("TSC timecounter discards lower %d bit(s)\n", 654 shift); 655 } 656 if (tsc_freq != 0) { 657 tsc_timecounter.tc_frequency = tsc_freq >> shift; 658 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 659 tc_init(&tsc_timecounter); 660 } 661 } 662 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 663 664 void 665 resume_TSC(void) 666 { 667 #ifdef SMP 668 int quality; 669 670 /* If TSC was not good on boot, it is unlikely to become good now. */ 671 if (tsc_timecounter.tc_quality < 0) 672 return; 673 /* Nothing to do with UP. */ 674 if (mp_ncpus < 2) 675 return; 676 677 /* 678 * If TSC was good, a single synchronization should be enough, 679 * but honour smp_tsc_adjust if it's set. 680 */ 681 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 682 if (quality != tsc_timecounter.tc_quality) { 683 printf("TSC timecounter quality changed: %d -> %d\n", 684 tsc_timecounter.tc_quality, quality); 685 tsc_timecounter.tc_quality = quality; 686 } 687 #endif /* SMP */ 688 } 689 690 /* 691 * When cpufreq levels change, find out about the (new) max frequency. We 692 * use this to update CPU accounting in case it got a lower estimate at boot. 693 */ 694 static void 695 tsc_levels_changed(void *arg, int unit) 696 { 697 device_t cf_dev; 698 struct cf_level *levels; 699 int count, error; 700 uint64_t max_freq; 701 702 /* Only use values from the first CPU, assuming all are equal. */ 703 if (unit != 0) 704 return; 705 706 /* Find the appropriate cpufreq device instance. */ 707 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 708 if (cf_dev == NULL) { 709 printf("tsc_levels_changed() called but no cpufreq device?\n"); 710 return; 711 } 712 713 /* Get settings from the device and find the max frequency. */ 714 count = 64; 715 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 716 if (levels == NULL) 717 return; 718 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 719 if (error == 0 && count != 0) { 720 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 721 set_cputicker(rdtsc, max_freq, 1); 722 } else 723 printf("tsc_levels_changed: no max freq found\n"); 724 free(levels, M_TEMP); 725 } 726 727 /* 728 * If the TSC timecounter is in use, veto the pending change. It may be 729 * possible in the future to handle a dynamically-changing timecounter rate. 730 */ 731 static void 732 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 733 { 734 735 if (*status != 0 || timecounter != &tsc_timecounter) 736 return; 737 738 printf("timecounter TSC must not be in use when " 739 "changing frequencies; change denied\n"); 740 *status = EBUSY; 741 } 742 743 /* Update TSC freq with the value indicated by the caller. */ 744 static void 745 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 746 { 747 uint64_t freq; 748 749 /* If there was an error during the transition, don't do anything. */ 750 if (tsc_disabled || status != 0) 751 return; 752 753 /* Total setting for this level gives the new frequency in MHz. */ 754 freq = (uint64_t)level->total_set.freq * 1000000; 755 atomic_store_rel_64(&tsc_freq, freq); 756 tsc_timecounter.tc_frequency = 757 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 758 } 759 760 static int 761 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 762 { 763 int error; 764 uint64_t freq; 765 766 freq = atomic_load_acq_64(&tsc_freq); 767 if (freq == 0) 768 return (EOPNOTSUPP); 769 error = sysctl_handle_64(oidp, &freq, 0, req); 770 if (error == 0 && req->newptr != NULL) { 771 atomic_store_rel_64(&tsc_freq, freq); 772 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 773 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 774 } 775 return (error); 776 } 777 778 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, 779 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 780 0, 0, sysctl_machdep_tsc_freq, "QU", 781 "Time Stamp Counter frequency"); 782 783 static u_int 784 tsc_get_timecount(struct timecounter *tc __unused) 785 { 786 787 return (rdtsc32()); 788 } 789 790 static u_int 791 tscp_get_timecount(struct timecounter *tc __unused) 792 { 793 794 return (rdtscp32()); 795 } 796 797 static inline u_int 798 tsc_get_timecount_low(struct timecounter *tc) 799 { 800 uint32_t rv; 801 802 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 803 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 804 return (rv); 805 } 806 807 static u_int 808 tscp_get_timecount_low(struct timecounter *tc) 809 { 810 uint32_t rv; 811 812 __asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0" 813 : "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx"); 814 return (rv); 815 } 816 817 static u_int 818 tsc_get_timecount_lfence(struct timecounter *tc __unused) 819 { 820 821 lfence(); 822 return (rdtsc32()); 823 } 824 825 static u_int 826 tsc_get_timecount_low_lfence(struct timecounter *tc) 827 { 828 829 lfence(); 830 return (tsc_get_timecount_low(tc)); 831 } 832 833 static u_int 834 tsc_get_timecount_mfence(struct timecounter *tc __unused) 835 { 836 837 mfence(); 838 return (rdtsc32()); 839 } 840 841 static u_int 842 tsc_get_timecount_low_mfence(struct timecounter *tc) 843 { 844 845 mfence(); 846 return (tsc_get_timecount_low(tc)); 847 } 848 849 static uint32_t 850 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 851 { 852 853 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 854 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 855 vdso_th->th_x86_hpet_idx = 0xffffffff; 856 vdso_th->th_x86_pvc_last_systime = 0; 857 vdso_th->th_x86_pvc_stable_mask = 0; 858 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 859 return (1); 860 } 861 862 #ifdef COMPAT_FREEBSD32 863 static uint32_t 864 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 865 struct timecounter *tc) 866 { 867 868 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 869 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 870 vdso_th32->th_x86_hpet_idx = 0xffffffff; 871 vdso_th32->th_x86_pvc_last_systime = 0; 872 vdso_th32->th_x86_pvc_stable_mask = 0; 873 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 874 return (1); 875 } 876 #endif 877