1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_clock.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/cpu.h> 38 #include <sys/eventhandler.h> 39 #include <sys/limits.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/sched.h> 43 #include <sys/sysctl.h> 44 #include <sys/time.h> 45 #include <sys/timetc.h> 46 #include <sys/kernel.h> 47 #include <sys/smp.h> 48 #include <sys/vdso.h> 49 #include <machine/clock.h> 50 #include <machine/cputypes.h> 51 #include <machine/md_var.h> 52 #include <machine/specialreg.h> 53 #include <x86/vmware.h> 54 #include <dev/acpica/acpi_hpet.h> 55 #include <contrib/dev/acpica/include/acpi.h> 56 57 #include "cpufreq_if.h" 58 59 uint64_t tsc_freq; 60 int tsc_is_invariant; 61 int tsc_perf_stat; 62 static int tsc_early_calib_exact; 63 64 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 65 66 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 67 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 68 69 #ifdef SMP 70 int smp_tsc; 71 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 72 "Indicates whether the TSC is safe to use in SMP mode"); 73 74 int smp_tsc_adjust = 0; 75 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 76 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 77 #endif 78 79 static int tsc_shift = 1; 80 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 81 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 82 83 static int tsc_disabled; 84 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 85 "Disable x86 Time Stamp Counter"); 86 87 static int tsc_skip_calibration; 88 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 89 &tsc_skip_calibration, 0, 90 "Disable early TSC frequency calibration"); 91 92 static void tsc_freq_changed(void *arg, const struct cf_level *level, 93 int status); 94 static void tsc_freq_changing(void *arg, const struct cf_level *level, 95 int *status); 96 static u_int tsc_get_timecount(struct timecounter *tc); 97 static inline u_int tsc_get_timecount_low(struct timecounter *tc); 98 static u_int tsc_get_timecount_lfence(struct timecounter *tc); 99 static u_int tsc_get_timecount_low_lfence(struct timecounter *tc); 100 static u_int tsc_get_timecount_mfence(struct timecounter *tc); 101 static u_int tsc_get_timecount_low_mfence(struct timecounter *tc); 102 static u_int tscp_get_timecount(struct timecounter *tc); 103 static u_int tscp_get_timecount_low(struct timecounter *tc); 104 static void tsc_levels_changed(void *arg, int unit); 105 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 106 struct timecounter *tc); 107 #ifdef COMPAT_FREEBSD32 108 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 109 struct timecounter *tc); 110 #endif 111 112 static struct timecounter tsc_timecounter = { 113 .tc_get_timecount = tsc_get_timecount, 114 .tc_counter_mask = ~0u, 115 .tc_name = "TSC", 116 .tc_quality = 800, /* adjusted in code */ 117 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 118 #ifdef COMPAT_FREEBSD32 119 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 120 #endif 121 }; 122 123 static void 124 tsc_freq_vmware(void) 125 { 126 u_int regs[4]; 127 128 if (hv_high >= 0x40000010) { 129 do_cpuid(0x40000010, regs); 130 tsc_freq = regs[0] * 1000; 131 } else { 132 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 133 if (regs[1] != UINT_MAX) 134 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 135 } 136 tsc_is_invariant = 1; 137 tsc_early_calib_exact = 1; 138 } 139 140 /* 141 * Calculate TSC frequency using information from the CPUID leaf 0x15 'Time 142 * Stamp Counter and Nominal Core Crystal Clock'. If leaf 0x15 is not 143 * functional, as it is on Skylake/Kabylake, try 0x16 'Processor Frequency 144 * Information'. Leaf 0x16 is described in the SDM as informational only, but 145 * we can use this value until late calibration is complete. 146 */ 147 static bool 148 tsc_freq_cpuid(uint64_t *res) 149 { 150 u_int regs[4]; 151 152 if (cpu_high < 0x15) 153 return (false); 154 do_cpuid(0x15, regs); 155 if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) { 156 *res = (uint64_t)regs[2] * regs[1] / regs[0]; 157 return (true); 158 } 159 160 if (cpu_high < 0x16) 161 return (false); 162 do_cpuid(0x16, regs); 163 if (regs[0] != 0) { 164 *res = (uint64_t)regs[0] * 1000000; 165 return (true); 166 } 167 168 return (false); 169 } 170 171 static bool 172 tsc_freq_intel_brand(uint64_t *res) 173 { 174 char brand[48]; 175 u_int regs[4]; 176 uint64_t freq; 177 char *p; 178 u_int i; 179 180 /* 181 * Intel Processor Identification and the CPUID Instruction 182 * Application Note 485. 183 * http://www.intel.com/assets/pdf/appnote/241618.pdf 184 */ 185 if (cpu_exthigh >= 0x80000004) { 186 p = brand; 187 for (i = 0x80000002; i < 0x80000005; i++) { 188 do_cpuid(i, regs); 189 memcpy(p, regs, sizeof(regs)); 190 p += sizeof(regs); 191 } 192 p = NULL; 193 for (i = 0; i < sizeof(brand) - 1; i++) 194 if (brand[i] == 'H' && brand[i + 1] == 'z') 195 p = brand + i; 196 if (p != NULL) { 197 p -= 5; 198 switch (p[4]) { 199 case 'M': 200 i = 1; 201 break; 202 case 'G': 203 i = 1000; 204 break; 205 case 'T': 206 i = 1000000; 207 break; 208 default: 209 return (false); 210 } 211 #define C2D(c) ((c) - '0') 212 if (p[1] == '.') { 213 freq = C2D(p[0]) * 1000; 214 freq += C2D(p[2]) * 100; 215 freq += C2D(p[3]) * 10; 216 freq *= i * 1000; 217 } else { 218 freq = C2D(p[0]) * 1000; 219 freq += C2D(p[1]) * 100; 220 freq += C2D(p[2]) * 10; 221 freq += C2D(p[3]); 222 freq *= i * 1000000; 223 } 224 #undef C2D 225 *res = freq; 226 return (true); 227 } 228 } 229 return (false); 230 } 231 232 static void 233 tsc_freq_8254(uint64_t *res) 234 { 235 uint64_t tsc1, tsc2; 236 int64_t overhead; 237 int count, i; 238 239 overhead = 0; 240 for (i = 0, count = 8; i < count; i++) { 241 tsc1 = rdtsc_ordered(); 242 DELAY(0); 243 tsc2 = rdtsc_ordered(); 244 if (i > 0) 245 overhead += tsc2 - tsc1; 246 } 247 overhead /= count; 248 249 tsc1 = rdtsc_ordered(); 250 DELAY(100000); 251 tsc2 = rdtsc_ordered(); 252 tsc_freq = (tsc2 - tsc1 - overhead) * 10; 253 } 254 255 static void 256 probe_tsc_freq(void) 257 { 258 if (cpu_power_ecx & CPUID_PERF_STAT) { 259 /* 260 * XXX Some emulators expose host CPUID without actual support 261 * for these MSRs. We must test whether they really work. 262 */ 263 wrmsr(MSR_MPERF, 0); 264 wrmsr(MSR_APERF, 0); 265 DELAY(10); 266 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 267 tsc_perf_stat = 1; 268 } 269 270 if (vm_guest == VM_GUEST_VMWARE) { 271 tsc_freq_vmware(); 272 return; 273 } 274 275 switch (cpu_vendor_id) { 276 case CPU_VENDOR_AMD: 277 case CPU_VENDOR_HYGON: 278 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 279 (vm_guest == VM_GUEST_NO && 280 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 281 tsc_is_invariant = 1; 282 if (cpu_feature & CPUID_SSE2) { 283 tsc_timecounter.tc_get_timecount = 284 tsc_get_timecount_mfence; 285 } 286 break; 287 case CPU_VENDOR_INTEL: 288 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 289 (vm_guest == VM_GUEST_NO && 290 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 291 CPUID_TO_MODEL(cpu_id) >= 0xe) || 292 (CPUID_TO_FAMILY(cpu_id) == 0xf && 293 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 294 tsc_is_invariant = 1; 295 if (cpu_feature & CPUID_SSE2) { 296 tsc_timecounter.tc_get_timecount = 297 tsc_get_timecount_lfence; 298 } 299 break; 300 case CPU_VENDOR_CENTAUR: 301 if (vm_guest == VM_GUEST_NO && 302 CPUID_TO_FAMILY(cpu_id) == 0x6 && 303 CPUID_TO_MODEL(cpu_id) >= 0xf && 304 (rdmsr(0x1203) & 0x100000000ULL) == 0) 305 tsc_is_invariant = 1; 306 if (cpu_feature & CPUID_SSE2) { 307 tsc_timecounter.tc_get_timecount = 308 tsc_get_timecount_lfence; 309 } 310 break; 311 } 312 313 if (tsc_freq_cpuid(&tsc_freq)) { 314 /* 315 * If possible, use the value obtained from CPUID as the initial 316 * frequency. This will be refined later during boot but is 317 * good enough for now. The 8254 PIT is not functional on some 318 * newer platforms anyway, so don't delay our boot for what 319 * might be a garbage result. Late calibration is required if 320 * the initial frequency was obtained from CPUID.16H, as the 321 * derived value may be off by as much as 1%. 322 */ 323 if (bootverbose) 324 printf("Early TSC frequency %juHz derived from CPUID\n", 325 (uintmax_t)tsc_freq); 326 } else if (tsc_skip_calibration) { 327 /* 328 * Try to parse the brand string to obtain the nominal TSC 329 * frequency. 330 */ 331 if (cpu_vendor_id == CPU_VENDOR_INTEL && 332 tsc_freq_intel_brand(&tsc_freq)) { 333 if (bootverbose) 334 printf( 335 "Early TSC frequency %juHz derived from brand string\n", 336 (uintmax_t)tsc_freq); 337 } else { 338 tsc_disabled = 1; 339 } 340 } else { 341 /* 342 * Calibrate against the 8254 PIT. This estimate will be 343 * refined later in tsc_calib(). 344 */ 345 tsc_freq_8254(&tsc_freq); 346 if (bootverbose) 347 printf( 348 "Early TSC frequency %juHz calibrated from 8254 PIT\n", 349 (uintmax_t)tsc_freq); 350 } 351 } 352 353 void 354 init_TSC(void) 355 { 356 357 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 358 return; 359 360 #ifdef __i386__ 361 /* The TSC is known to be broken on certain CPUs. */ 362 switch (cpu_vendor_id) { 363 case CPU_VENDOR_AMD: 364 switch (cpu_id & 0xFF0) { 365 case 0x500: 366 /* K5 Model 0 */ 367 return; 368 } 369 break; 370 case CPU_VENDOR_CENTAUR: 371 switch (cpu_id & 0xff0) { 372 case 0x540: 373 /* 374 * http://www.centtech.com/c6_data_sheet.pdf 375 * 376 * I-12 RDTSC may return incoherent values in EDX:EAX 377 * I-13 RDTSC hangs when certain event counters are used 378 */ 379 return; 380 } 381 break; 382 case CPU_VENDOR_NSC: 383 switch (cpu_id & 0xff0) { 384 case 0x540: 385 if ((cpu_id & CPUID_STEPPING) == 0) 386 return; 387 break; 388 } 389 break; 390 } 391 #endif 392 393 probe_tsc_freq(); 394 395 /* 396 * Inform CPU accounting about our boot-time clock rate. This will 397 * be updated if someone loads a cpufreq driver after boot that 398 * discovers a new max frequency. 399 * 400 * The frequency may also be updated after late calibration is complete; 401 * however, we register the TSC as the ticker now to avoid switching 402 * counters after much of the kernel has already booted and potentially 403 * sampled the CPU clock. 404 */ 405 if (tsc_freq != 0) 406 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 407 408 if (tsc_is_invariant) 409 return; 410 411 /* Register to find out about changes in CPU frequency. */ 412 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 413 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 414 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 415 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 416 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 417 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 418 } 419 420 #ifdef SMP 421 422 /* 423 * RDTSC is not a serializing instruction, and does not drain 424 * instruction stream, so we need to drain the stream before executing 425 * it. It could be fixed by use of RDTSCP, except the instruction is 426 * not available everywhere. 427 * 428 * Use CPUID for draining in the boot-time SMP constistency test. The 429 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 430 * and VIA) when SSE2 is present, and nothing on older machines which 431 * also do not issue RDTSC prematurely. There, testing for SSE2 and 432 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 433 * 434 * Do not use do_cpuid(), since we do not need CPUID results, which 435 * have to be written into memory with do_cpuid(). 436 */ 437 #define TSC_READ(x) \ 438 static void \ 439 tsc_read_##x(void *arg) \ 440 { \ 441 uint64_t *tsc = arg; \ 442 u_int cpu = PCPU_GET(cpuid); \ 443 \ 444 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 445 tsc[cpu * 3 + x] = rdtsc(); \ 446 } 447 TSC_READ(0) 448 TSC_READ(1) 449 TSC_READ(2) 450 #undef TSC_READ 451 452 #define N 1000 453 454 static void 455 comp_smp_tsc(void *arg) 456 { 457 uint64_t *tsc; 458 int64_t d1, d2; 459 u_int cpu = PCPU_GET(cpuid); 460 u_int i, j, size; 461 462 size = (mp_maxid + 1) * 3; 463 for (i = 0, tsc = arg; i < N; i++, tsc += size) 464 CPU_FOREACH(j) { 465 if (j == cpu) 466 continue; 467 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 468 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 469 if (d1 <= 0 || d2 <= 0) { 470 smp_tsc = 0; 471 return; 472 } 473 } 474 } 475 476 static void 477 adj_smp_tsc(void *arg) 478 { 479 uint64_t *tsc; 480 int64_t d, min, max; 481 u_int cpu = PCPU_GET(cpuid); 482 u_int first, i, size; 483 484 first = CPU_FIRST(); 485 if (cpu == first) 486 return; 487 min = INT64_MIN; 488 max = INT64_MAX; 489 size = (mp_maxid + 1) * 3; 490 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 491 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 492 if (d > min) 493 min = d; 494 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 495 if (d > min) 496 min = d; 497 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 498 if (d < max) 499 max = d; 500 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 501 if (d < max) 502 max = d; 503 } 504 if (min > max) 505 return; 506 d = min / 2 + max / 2; 507 __asm __volatile ( 508 "movl $0x10, %%ecx\n\t" 509 "rdmsr\n\t" 510 "addl %%edi, %%eax\n\t" 511 "adcl %%esi, %%edx\n\t" 512 "wrmsr\n" 513 : /* No output */ 514 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 515 : "ax", "cx", "dx", "cc" 516 ); 517 } 518 519 static int 520 test_tsc(int adj_max_count) 521 { 522 uint64_t *data, *tsc; 523 u_int i, size, adj; 524 525 if ((!smp_tsc && !tsc_is_invariant)) 526 return (-100); 527 /* 528 * Misbehavior of TSC under VirtualBox has been observed. In 529 * particular, threads doing small (~1 second) sleeps may miss their 530 * wakeup and hang around in sleep state, causing hangs on shutdown. 531 */ 532 if (vm_guest == VM_GUEST_VBOX) 533 return (0); 534 535 TSENTER(); 536 size = (mp_maxid + 1) * 3; 537 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 538 adj = 0; 539 retry: 540 for (i = 0, tsc = data; i < N; i++, tsc += size) 541 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 542 smp_tsc = 1; /* XXX */ 543 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 544 smp_no_rendezvous_barrier, data); 545 if (!smp_tsc && adj < adj_max_count) { 546 adj++; 547 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 548 smp_no_rendezvous_barrier, data); 549 goto retry; 550 } 551 free(data, M_TEMP); 552 if (bootverbose) 553 printf("SMP: %sed TSC synchronization test%s\n", 554 smp_tsc ? "pass" : "fail", 555 adj > 0 ? " after adjustment" : ""); 556 TSEXIT(); 557 if (smp_tsc && tsc_is_invariant) { 558 switch (cpu_vendor_id) { 559 case CPU_VENDOR_AMD: 560 case CPU_VENDOR_HYGON: 561 /* 562 * Processor Programming Reference (PPR) for AMD 563 * Family 17h states that the TSC uses a common 564 * reference for all sockets, cores and threads. 565 */ 566 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) 567 return (1000); 568 /* 569 * Starting with Family 15h processors, TSC clock 570 * source is in the north bridge. Check whether 571 * we have a single-socket/multi-core platform. 572 * XXX Need more work for complex cases. 573 */ 574 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 575 (amd_feature2 & AMDID2_CMP) == 0 || 576 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 577 break; 578 return (1000); 579 case CPU_VENDOR_INTEL: 580 /* 581 * XXX Assume Intel platforms have synchronized TSCs. 582 */ 583 return (1000); 584 } 585 return (800); 586 } 587 return (-100); 588 } 589 590 #undef N 591 592 #endif /* SMP */ 593 594 static void 595 init_TSC_tc(void) 596 { 597 uint64_t max_freq; 598 int shift; 599 600 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 601 return; 602 603 /* 604 * Limit timecounter frequency to fit in an int and prevent it from 605 * overflowing too fast. 606 */ 607 max_freq = UINT_MAX; 608 609 /* 610 * Intel CPUs without a C-state invariant TSC can stop the TSC 611 * in either C2 or C3. Disable use of C2 and C3 while using 612 * the TSC as the timecounter. The timecounter can be changed 613 * to enable C2 and C3. 614 * 615 * Note that the TSC is used as the cputicker for computing 616 * thread runtime regardless of the timecounter setting, so 617 * using an alternate timecounter and enabling C2 or C3 can 618 * result incorrect runtimes for kernel idle threads (but not 619 * for any non-idle threads). 620 */ 621 if (cpu_vendor_id == CPU_VENDOR_INTEL && 622 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 623 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 624 if (bootverbose) 625 printf("TSC timecounter disables C2 and C3.\n"); 626 } 627 628 /* 629 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 630 * are synchronized. If the user is sure that the system has 631 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 632 * non-zero value. The TSC seems unreliable in virtualized SMP 633 * environments, so it is set to a negative quality in those cases. 634 */ 635 #ifdef SMP 636 if (mp_ncpus > 1) 637 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 638 else 639 #endif /* SMP */ 640 if (tsc_is_invariant) 641 tsc_timecounter.tc_quality = 1000; 642 max_freq >>= tsc_shift; 643 644 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 645 ; 646 647 /* 648 * Timecounter implementation selection, top to bottom: 649 * - If RDTSCP is available, use RDTSCP. 650 * - If fence instructions are provided (SSE2), use LFENCE;RDTSC 651 * on Intel, and MFENCE;RDTSC on AMD. 652 * - For really old CPUs, just use RDTSC. 653 */ 654 if ((amd_feature & AMDID_RDTSCP) != 0) { 655 tsc_timecounter.tc_get_timecount = shift > 0 ? 656 tscp_get_timecount_low : tscp_get_timecount; 657 } else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 658 if (cpu_vendor_id == CPU_VENDOR_AMD || 659 cpu_vendor_id == CPU_VENDOR_HYGON) { 660 tsc_timecounter.tc_get_timecount = shift > 0 ? 661 tsc_get_timecount_low_mfence : 662 tsc_get_timecount_mfence; 663 } else { 664 tsc_timecounter.tc_get_timecount = shift > 0 ? 665 tsc_get_timecount_low_lfence : 666 tsc_get_timecount_lfence; 667 } 668 } else { 669 tsc_timecounter.tc_get_timecount = shift > 0 ? 670 tsc_get_timecount_low : tsc_get_timecount; 671 } 672 if (shift > 0) { 673 tsc_timecounter.tc_name = "TSC-low"; 674 if (bootverbose) 675 printf("TSC timecounter discards lower %d bit(s)\n", 676 shift); 677 } 678 if (tsc_freq != 0) { 679 tsc_timecounter.tc_frequency = tsc_freq >> shift; 680 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 681 682 /* 683 * Timecounter registration is deferred until after late 684 * calibration is finished. 685 */ 686 } 687 } 688 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 689 690 static void 691 tsc_update_freq(uint64_t new_freq) 692 { 693 atomic_store_rel_64(&tsc_freq, new_freq); 694 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 695 new_freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 696 } 697 698 /* 699 * Perform late calibration of the TSC frequency once ACPI-based timecounters 700 * are available. At this point timehands are not set up, so we read the 701 * highest-quality timecounter directly rather than using (s)binuptime(). 702 */ 703 void 704 tsc_calibrate(void) 705 { 706 struct timecounter *tc; 707 uint64_t freq, tsc_start, tsc_end; 708 u_int t_start, t_end; 709 register_t flags; 710 int cpu; 711 712 if (tsc_disabled) 713 return; 714 if (tsc_early_calib_exact) 715 goto calibrated; 716 717 /* 718 * Avoid using a low-quality timecounter to re-calibrate. In 719 * particular, old 32-bit platforms might only have the 8254 timer to 720 * calibrate against. 721 */ 722 tc = atomic_load_ptr(&timecounter); 723 if (tc->tc_quality <= 0) 724 goto calibrated; 725 726 flags = intr_disable(); 727 cpu = curcpu; 728 tsc_start = rdtsc_ordered(); 729 t_start = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 730 intr_restore(flags); 731 732 DELAY(1000000); 733 734 thread_lock(curthread); 735 sched_bind(curthread, cpu); 736 737 flags = intr_disable(); 738 tsc_end = rdtsc_ordered(); 739 t_end = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 740 intr_restore(flags); 741 742 sched_unbind(curthread); 743 thread_unlock(curthread); 744 745 if (t_end <= t_start) { 746 /* Assume that the counter has wrapped around at most once. */ 747 t_end += (uint64_t)tc->tc_counter_mask + 1; 748 } 749 750 freq = tc->tc_frequency * (tsc_end - tsc_start) / (t_end - t_start); 751 752 tsc_update_freq(freq); 753 calibrated: 754 tc_init(&tsc_timecounter); 755 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 756 } 757 758 void 759 resume_TSC(void) 760 { 761 #ifdef SMP 762 int quality; 763 764 /* If TSC was not good on boot, it is unlikely to become good now. */ 765 if (tsc_timecounter.tc_quality < 0) 766 return; 767 /* Nothing to do with UP. */ 768 if (mp_ncpus < 2) 769 return; 770 771 /* 772 * If TSC was good, a single synchronization should be enough, 773 * but honour smp_tsc_adjust if it's set. 774 */ 775 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 776 if (quality != tsc_timecounter.tc_quality) { 777 printf("TSC timecounter quality changed: %d -> %d\n", 778 tsc_timecounter.tc_quality, quality); 779 tsc_timecounter.tc_quality = quality; 780 } 781 #endif /* SMP */ 782 } 783 784 /* 785 * When cpufreq levels change, find out about the (new) max frequency. We 786 * use this to update CPU accounting in case it got a lower estimate at boot. 787 */ 788 static void 789 tsc_levels_changed(void *arg, int unit) 790 { 791 device_t cf_dev; 792 struct cf_level *levels; 793 int count, error; 794 uint64_t max_freq; 795 796 /* Only use values from the first CPU, assuming all are equal. */ 797 if (unit != 0) 798 return; 799 800 /* Find the appropriate cpufreq device instance. */ 801 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 802 if (cf_dev == NULL) { 803 printf("tsc_levels_changed() called but no cpufreq device?\n"); 804 return; 805 } 806 807 /* Get settings from the device and find the max frequency. */ 808 count = 64; 809 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 810 if (levels == NULL) 811 return; 812 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 813 if (error == 0 && count != 0) { 814 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 815 set_cputicker(rdtsc, max_freq, 1); 816 } else 817 printf("tsc_levels_changed: no max freq found\n"); 818 free(levels, M_TEMP); 819 } 820 821 /* 822 * If the TSC timecounter is in use, veto the pending change. It may be 823 * possible in the future to handle a dynamically-changing timecounter rate. 824 */ 825 static void 826 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 827 { 828 829 if (*status != 0 || timecounter != &tsc_timecounter) 830 return; 831 832 printf("timecounter TSC must not be in use when " 833 "changing frequencies; change denied\n"); 834 *status = EBUSY; 835 } 836 837 /* Update TSC freq with the value indicated by the caller. */ 838 static void 839 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 840 { 841 uint64_t freq; 842 843 /* If there was an error during the transition, don't do anything. */ 844 if (tsc_disabled || status != 0) 845 return; 846 847 /* Total setting for this level gives the new frequency in MHz. */ 848 freq = (uint64_t)level->total_set.freq * 1000000; 849 tsc_update_freq(freq); 850 } 851 852 static int 853 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 854 { 855 int error; 856 uint64_t freq; 857 858 freq = atomic_load_acq_64(&tsc_freq); 859 if (freq == 0) 860 return (EOPNOTSUPP); 861 error = sysctl_handle_64(oidp, &freq, 0, req); 862 if (error == 0 && req->newptr != NULL) 863 tsc_update_freq(freq); 864 return (error); 865 } 866 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, 867 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, 868 0, 0, sysctl_machdep_tsc_freq, "QU", 869 "Time Stamp Counter frequency"); 870 871 static u_int 872 tsc_get_timecount(struct timecounter *tc __unused) 873 { 874 875 return (rdtsc32()); 876 } 877 878 static u_int 879 tscp_get_timecount(struct timecounter *tc __unused) 880 { 881 882 return (rdtscp32()); 883 } 884 885 static inline u_int 886 tsc_get_timecount_low(struct timecounter *tc) 887 { 888 uint32_t rv; 889 890 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 891 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 892 return (rv); 893 } 894 895 static u_int 896 tscp_get_timecount_low(struct timecounter *tc) 897 { 898 uint32_t rv; 899 900 __asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0" 901 : "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx"); 902 return (rv); 903 } 904 905 static u_int 906 tsc_get_timecount_lfence(struct timecounter *tc __unused) 907 { 908 909 lfence(); 910 return (rdtsc32()); 911 } 912 913 static u_int 914 tsc_get_timecount_low_lfence(struct timecounter *tc) 915 { 916 917 lfence(); 918 return (tsc_get_timecount_low(tc)); 919 } 920 921 static u_int 922 tsc_get_timecount_mfence(struct timecounter *tc __unused) 923 { 924 925 mfence(); 926 return (rdtsc32()); 927 } 928 929 static u_int 930 tsc_get_timecount_low_mfence(struct timecounter *tc) 931 { 932 933 mfence(); 934 return (tsc_get_timecount_low(tc)); 935 } 936 937 static uint32_t 938 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 939 { 940 941 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 942 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 943 vdso_th->th_x86_hpet_idx = 0xffffffff; 944 vdso_th->th_x86_pvc_last_systime = 0; 945 vdso_th->th_x86_pvc_stable_mask = 0; 946 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 947 return (1); 948 } 949 950 #ifdef COMPAT_FREEBSD32 951 static uint32_t 952 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 953 struct timecounter *tc) 954 { 955 956 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 957 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 958 vdso_th32->th_x86_hpet_idx = 0xffffffff; 959 vdso_th32->th_x86_pvc_last_systime = 0; 960 vdso_th32->th_x86_pvc_stable_mask = 0; 961 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 962 return (1); 963 } 964 #endif 965