1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_clock.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/cpu.h> 38 #include <sys/eventhandler.h> 39 #include <sys/limits.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/sched.h> 43 #include <sys/sysctl.h> 44 #include <sys/time.h> 45 #include <sys/timetc.h> 46 #include <sys/kernel.h> 47 #include <sys/smp.h> 48 #include <sys/vdso.h> 49 #include <machine/clock.h> 50 #include <machine/cputypes.h> 51 #include <machine/md_var.h> 52 #include <machine/specialreg.h> 53 #include <x86/vmware.h> 54 #include <dev/acpica/acpi_hpet.h> 55 #include <contrib/dev/acpica/include/acpi.h> 56 57 #include "cpufreq_if.h" 58 59 uint64_t tsc_freq; 60 int tsc_is_invariant; 61 int tsc_perf_stat; 62 63 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 64 65 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 66 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 67 68 #ifdef SMP 69 int smp_tsc; 70 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 71 "Indicates whether the TSC is safe to use in SMP mode"); 72 73 int smp_tsc_adjust = 0; 74 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 75 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 76 #endif 77 78 static int tsc_shift = 1; 79 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 80 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 81 82 static int tsc_disabled; 83 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 84 "Disable x86 Time Stamp Counter"); 85 86 static int tsc_skip_calibration; 87 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 88 &tsc_skip_calibration, 0, 89 "Disable early TSC frequency calibration"); 90 91 static void tsc_freq_changed(void *arg, const struct cf_level *level, 92 int status); 93 static void tsc_freq_changing(void *arg, const struct cf_level *level, 94 int *status); 95 static u_int tsc_get_timecount(struct timecounter *tc); 96 static inline u_int tsc_get_timecount_low(struct timecounter *tc); 97 static u_int tsc_get_timecount_lfence(struct timecounter *tc); 98 static u_int tsc_get_timecount_low_lfence(struct timecounter *tc); 99 static u_int tsc_get_timecount_mfence(struct timecounter *tc); 100 static u_int tsc_get_timecount_low_mfence(struct timecounter *tc); 101 static u_int tscp_get_timecount(struct timecounter *tc); 102 static u_int tscp_get_timecount_low(struct timecounter *tc); 103 static void tsc_levels_changed(void *arg, int unit); 104 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 105 struct timecounter *tc); 106 #ifdef COMPAT_FREEBSD32 107 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 108 struct timecounter *tc); 109 #endif 110 111 static struct timecounter tsc_timecounter = { 112 .tc_get_timecount = tsc_get_timecount, 113 .tc_counter_mask = ~0u, 114 .tc_name = "TSC", 115 .tc_quality = 800, /* adjusted in code */ 116 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 117 #ifdef COMPAT_FREEBSD32 118 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 119 #endif 120 }; 121 122 static void 123 tsc_freq_vmware(void) 124 { 125 u_int regs[4]; 126 127 if (hv_high >= 0x40000010) { 128 do_cpuid(0x40000010, regs); 129 tsc_freq = regs[0] * 1000; 130 } else { 131 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 132 if (regs[1] != UINT_MAX) 133 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 134 } 135 tsc_is_invariant = 1; 136 } 137 138 /* 139 * Calculate TSC frequency using information from the CPUID leaf 0x15 'Time 140 * Stamp Counter and Nominal Core Crystal Clock'. If leaf 0x15 is not 141 * functional, as it is on Skylake/Kabylake, try 0x16 'Processor Frequency 142 * Information'. Leaf 0x16 is described in the SDM as informational only, but 143 * we can use this value until late calibration is complete. 144 */ 145 static bool 146 tsc_freq_cpuid(uint64_t *res) 147 { 148 u_int regs[4]; 149 150 if (cpu_high < 0x15) 151 return (false); 152 do_cpuid(0x15, regs); 153 if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) { 154 *res = (uint64_t)regs[2] * regs[1] / regs[0]; 155 return (true); 156 } 157 158 if (cpu_high < 0x16) 159 return (false); 160 do_cpuid(0x16, regs); 161 if (regs[0] != 0) { 162 *res = (uint64_t)regs[0] * 1000000; 163 return (true); 164 } 165 166 return (false); 167 } 168 169 static bool 170 tsc_freq_intel_brand(uint64_t *res) 171 { 172 char brand[48]; 173 u_int regs[4]; 174 uint64_t freq; 175 char *p; 176 u_int i; 177 178 /* 179 * Intel Processor Identification and the CPUID Instruction 180 * Application Note 485. 181 * http://www.intel.com/assets/pdf/appnote/241618.pdf 182 */ 183 if (cpu_exthigh >= 0x80000004) { 184 p = brand; 185 for (i = 0x80000002; i < 0x80000005; i++) { 186 do_cpuid(i, regs); 187 memcpy(p, regs, sizeof(regs)); 188 p += sizeof(regs); 189 } 190 p = NULL; 191 for (i = 0; i < sizeof(brand) - 1; i++) 192 if (brand[i] == 'H' && brand[i + 1] == 'z') 193 p = brand + i; 194 if (p != NULL) { 195 p -= 5; 196 switch (p[4]) { 197 case 'M': 198 i = 1; 199 break; 200 case 'G': 201 i = 1000; 202 break; 203 case 'T': 204 i = 1000000; 205 break; 206 default: 207 return (false); 208 } 209 #define C2D(c) ((c) - '0') 210 if (p[1] == '.') { 211 freq = C2D(p[0]) * 1000; 212 freq += C2D(p[2]) * 100; 213 freq += C2D(p[3]) * 10; 214 freq *= i * 1000; 215 } else { 216 freq = C2D(p[0]) * 1000; 217 freq += C2D(p[1]) * 100; 218 freq += C2D(p[2]) * 10; 219 freq += C2D(p[3]); 220 freq *= i * 1000000; 221 } 222 #undef C2D 223 *res = freq; 224 return (true); 225 } 226 } 227 return (false); 228 } 229 230 static void 231 tsc_freq_8254(uint64_t *res) 232 { 233 uint64_t tsc1, tsc2; 234 int64_t overhead; 235 int count, i; 236 237 overhead = 0; 238 for (i = 0, count = 8; i < count; i++) { 239 tsc1 = rdtsc_ordered(); 240 DELAY(0); 241 tsc2 = rdtsc_ordered(); 242 if (i > 0) 243 overhead += tsc2 - tsc1; 244 } 245 overhead /= count; 246 247 tsc1 = rdtsc_ordered(); 248 DELAY(100000); 249 tsc2 = rdtsc_ordered(); 250 tsc_freq = (tsc2 - tsc1 - overhead) * 10; 251 } 252 253 static void 254 probe_tsc_freq(void) 255 { 256 if (cpu_power_ecx & CPUID_PERF_STAT) { 257 /* 258 * XXX Some emulators expose host CPUID without actual support 259 * for these MSRs. We must test whether they really work. 260 */ 261 wrmsr(MSR_MPERF, 0); 262 wrmsr(MSR_APERF, 0); 263 DELAY(10); 264 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 265 tsc_perf_stat = 1; 266 } 267 268 if (vm_guest == VM_GUEST_VMWARE) { 269 tsc_freq_vmware(); 270 return; 271 } 272 273 switch (cpu_vendor_id) { 274 case CPU_VENDOR_AMD: 275 case CPU_VENDOR_HYGON: 276 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 277 (vm_guest == VM_GUEST_NO && 278 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 279 tsc_is_invariant = 1; 280 if (cpu_feature & CPUID_SSE2) { 281 tsc_timecounter.tc_get_timecount = 282 tsc_get_timecount_mfence; 283 } 284 break; 285 case CPU_VENDOR_INTEL: 286 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 287 (vm_guest == VM_GUEST_NO && 288 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 289 CPUID_TO_MODEL(cpu_id) >= 0xe) || 290 (CPUID_TO_FAMILY(cpu_id) == 0xf && 291 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 292 tsc_is_invariant = 1; 293 if (cpu_feature & CPUID_SSE2) { 294 tsc_timecounter.tc_get_timecount = 295 tsc_get_timecount_lfence; 296 } 297 break; 298 case CPU_VENDOR_CENTAUR: 299 if (vm_guest == VM_GUEST_NO && 300 CPUID_TO_FAMILY(cpu_id) == 0x6 && 301 CPUID_TO_MODEL(cpu_id) >= 0xf && 302 (rdmsr(0x1203) & 0x100000000ULL) == 0) 303 tsc_is_invariant = 1; 304 if (cpu_feature & CPUID_SSE2) { 305 tsc_timecounter.tc_get_timecount = 306 tsc_get_timecount_lfence; 307 } 308 break; 309 } 310 311 if (tsc_freq_cpuid(&tsc_freq)) { 312 /* 313 * If possible, use the value obtained from CPUID as the initial 314 * frequency. This will be refined later during boot but is 315 * good enough for now. The 8254 PIT is not functional on some 316 * newer platforms anyway, so don't delay our boot for what 317 * might be a garbage result. Late calibration is required if 318 * the initial frequency was obtained from CPUID.16H, as the 319 * derived value may be off by as much as 1%. 320 */ 321 if (bootverbose) 322 printf("Early TSC frequency %juHz derived from CPUID\n", 323 (uintmax_t)tsc_freq); 324 } else if (tsc_skip_calibration) { 325 /* 326 * Try to parse the brand string to obtain the nominal TSC 327 * frequency. 328 */ 329 if (cpu_vendor_id == CPU_VENDOR_INTEL && 330 tsc_freq_intel_brand(&tsc_freq)) { 331 if (bootverbose) 332 printf( 333 "Early TSC frequency %juHz derived from brand string\n", 334 (uintmax_t)tsc_freq); 335 } else { 336 tsc_disabled = 1; 337 } 338 } else { 339 /* 340 * Calibrate against the 8254 PIT. This estimate will be 341 * refined later in tsc_calib(). 342 */ 343 tsc_freq_8254(&tsc_freq); 344 if (bootverbose) 345 printf( 346 "Early TSC frequency %juHz calibrated from 8254 PIT\n", 347 (uintmax_t)tsc_freq); 348 } 349 } 350 351 void 352 init_TSC(void) 353 { 354 355 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 356 return; 357 358 #ifdef __i386__ 359 /* The TSC is known to be broken on certain CPUs. */ 360 switch (cpu_vendor_id) { 361 case CPU_VENDOR_AMD: 362 switch (cpu_id & 0xFF0) { 363 case 0x500: 364 /* K5 Model 0 */ 365 return; 366 } 367 break; 368 case CPU_VENDOR_CENTAUR: 369 switch (cpu_id & 0xff0) { 370 case 0x540: 371 /* 372 * http://www.centtech.com/c6_data_sheet.pdf 373 * 374 * I-12 RDTSC may return incoherent values in EDX:EAX 375 * I-13 RDTSC hangs when certain event counters are used 376 */ 377 return; 378 } 379 break; 380 case CPU_VENDOR_NSC: 381 switch (cpu_id & 0xff0) { 382 case 0x540: 383 if ((cpu_id & CPUID_STEPPING) == 0) 384 return; 385 break; 386 } 387 break; 388 } 389 #endif 390 391 probe_tsc_freq(); 392 393 /* 394 * Inform CPU accounting about our boot-time clock rate. This will 395 * be updated if someone loads a cpufreq driver after boot that 396 * discovers a new max frequency. 397 * 398 * The frequency may also be updated after late calibration is complete; 399 * however, we register the TSC as the ticker now to avoid switching 400 * counters after much of the kernel has already booted and potentially 401 * sampled the CPU clock. 402 */ 403 if (tsc_freq != 0) 404 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 405 406 if (tsc_is_invariant) 407 return; 408 409 /* Register to find out about changes in CPU frequency. */ 410 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 411 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 412 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 413 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 414 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 415 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 416 } 417 418 #ifdef SMP 419 420 /* 421 * RDTSC is not a serializing instruction, and does not drain 422 * instruction stream, so we need to drain the stream before executing 423 * it. It could be fixed by use of RDTSCP, except the instruction is 424 * not available everywhere. 425 * 426 * Use CPUID for draining in the boot-time SMP constistency test. The 427 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 428 * and VIA) when SSE2 is present, and nothing on older machines which 429 * also do not issue RDTSC prematurely. There, testing for SSE2 and 430 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 431 * 432 * Do not use do_cpuid(), since we do not need CPUID results, which 433 * have to be written into memory with do_cpuid(). 434 */ 435 #define TSC_READ(x) \ 436 static void \ 437 tsc_read_##x(void *arg) \ 438 { \ 439 uint64_t *tsc = arg; \ 440 u_int cpu = PCPU_GET(cpuid); \ 441 \ 442 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 443 tsc[cpu * 3 + x] = rdtsc(); \ 444 } 445 TSC_READ(0) 446 TSC_READ(1) 447 TSC_READ(2) 448 #undef TSC_READ 449 450 #define N 1000 451 452 static void 453 comp_smp_tsc(void *arg) 454 { 455 uint64_t *tsc; 456 int64_t d1, d2; 457 u_int cpu = PCPU_GET(cpuid); 458 u_int i, j, size; 459 460 size = (mp_maxid + 1) * 3; 461 for (i = 0, tsc = arg; i < N; i++, tsc += size) 462 CPU_FOREACH(j) { 463 if (j == cpu) 464 continue; 465 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 466 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 467 if (d1 <= 0 || d2 <= 0) { 468 smp_tsc = 0; 469 return; 470 } 471 } 472 } 473 474 static void 475 adj_smp_tsc(void *arg) 476 { 477 uint64_t *tsc; 478 int64_t d, min, max; 479 u_int cpu = PCPU_GET(cpuid); 480 u_int first, i, size; 481 482 first = CPU_FIRST(); 483 if (cpu == first) 484 return; 485 min = INT64_MIN; 486 max = INT64_MAX; 487 size = (mp_maxid + 1) * 3; 488 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 489 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 490 if (d > min) 491 min = d; 492 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 493 if (d > min) 494 min = d; 495 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 496 if (d < max) 497 max = d; 498 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 499 if (d < max) 500 max = d; 501 } 502 if (min > max) 503 return; 504 d = min / 2 + max / 2; 505 __asm __volatile ( 506 "movl $0x10, %%ecx\n\t" 507 "rdmsr\n\t" 508 "addl %%edi, %%eax\n\t" 509 "adcl %%esi, %%edx\n\t" 510 "wrmsr\n" 511 : /* No output */ 512 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 513 : "ax", "cx", "dx", "cc" 514 ); 515 } 516 517 static int 518 test_tsc(int adj_max_count) 519 { 520 uint64_t *data, *tsc; 521 u_int i, size, adj; 522 523 if ((!smp_tsc && !tsc_is_invariant)) 524 return (-100); 525 /* 526 * Misbehavior of TSC under VirtualBox has been observed. In 527 * particular, threads doing small (~1 second) sleeps may miss their 528 * wakeup and hang around in sleep state, causing hangs on shutdown. 529 */ 530 if (vm_guest == VM_GUEST_VBOX) 531 return (0); 532 533 TSENTER(); 534 size = (mp_maxid + 1) * 3; 535 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 536 adj = 0; 537 retry: 538 for (i = 0, tsc = data; i < N; i++, tsc += size) 539 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 540 smp_tsc = 1; /* XXX */ 541 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 542 smp_no_rendezvous_barrier, data); 543 if (!smp_tsc && adj < adj_max_count) { 544 adj++; 545 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 546 smp_no_rendezvous_barrier, data); 547 goto retry; 548 } 549 free(data, M_TEMP); 550 if (bootverbose) 551 printf("SMP: %sed TSC synchronization test%s\n", 552 smp_tsc ? "pass" : "fail", 553 adj > 0 ? " after adjustment" : ""); 554 TSEXIT(); 555 if (smp_tsc && tsc_is_invariant) { 556 switch (cpu_vendor_id) { 557 case CPU_VENDOR_AMD: 558 case CPU_VENDOR_HYGON: 559 /* 560 * Processor Programming Reference (PPR) for AMD 561 * Family 17h states that the TSC uses a common 562 * reference for all sockets, cores and threads. 563 */ 564 if (CPUID_TO_FAMILY(cpu_id) >= 0x17) 565 return (1000); 566 /* 567 * Starting with Family 15h processors, TSC clock 568 * source is in the north bridge. Check whether 569 * we have a single-socket/multi-core platform. 570 * XXX Need more work for complex cases. 571 */ 572 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 573 (amd_feature2 & AMDID2_CMP) == 0 || 574 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 575 break; 576 return (1000); 577 case CPU_VENDOR_INTEL: 578 /* 579 * XXX Assume Intel platforms have synchronized TSCs. 580 */ 581 return (1000); 582 } 583 return (800); 584 } 585 return (-100); 586 } 587 588 #undef N 589 590 #endif /* SMP */ 591 592 static void 593 init_TSC_tc(void) 594 { 595 uint64_t max_freq; 596 int shift; 597 598 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 599 return; 600 601 /* 602 * Limit timecounter frequency to fit in an int and prevent it from 603 * overflowing too fast. 604 */ 605 max_freq = UINT_MAX; 606 607 /* 608 * Intel CPUs without a C-state invariant TSC can stop the TSC 609 * in either C2 or C3. Disable use of C2 and C3 while using 610 * the TSC as the timecounter. The timecounter can be changed 611 * to enable C2 and C3. 612 * 613 * Note that the TSC is used as the cputicker for computing 614 * thread runtime regardless of the timecounter setting, so 615 * using an alternate timecounter and enabling C2 or C3 can 616 * result incorrect runtimes for kernel idle threads (but not 617 * for any non-idle threads). 618 */ 619 if (cpu_vendor_id == CPU_VENDOR_INTEL && 620 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 621 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 622 if (bootverbose) 623 printf("TSC timecounter disables C2 and C3.\n"); 624 } 625 626 /* 627 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 628 * are synchronized. If the user is sure that the system has 629 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 630 * non-zero value. The TSC seems unreliable in virtualized SMP 631 * environments, so it is set to a negative quality in those cases. 632 */ 633 #ifdef SMP 634 if (mp_ncpus > 1) 635 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 636 else 637 #endif /* SMP */ 638 if (tsc_is_invariant) 639 tsc_timecounter.tc_quality = 1000; 640 max_freq >>= tsc_shift; 641 642 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 643 ; 644 645 /* 646 * Timecounter implementation selection, top to bottom: 647 * - If RDTSCP is available, use RDTSCP. 648 * - If fence instructions are provided (SSE2), use LFENCE;RDTSC 649 * on Intel, and MFENCE;RDTSC on AMD. 650 * - For really old CPUs, just use RDTSC. 651 */ 652 if ((amd_feature & AMDID_RDTSCP) != 0) { 653 tsc_timecounter.tc_get_timecount = shift > 0 ? 654 tscp_get_timecount_low : tscp_get_timecount; 655 } else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 656 if (cpu_vendor_id == CPU_VENDOR_AMD || 657 cpu_vendor_id == CPU_VENDOR_HYGON) { 658 tsc_timecounter.tc_get_timecount = shift > 0 ? 659 tsc_get_timecount_low_mfence : 660 tsc_get_timecount_mfence; 661 } else { 662 tsc_timecounter.tc_get_timecount = shift > 0 ? 663 tsc_get_timecount_low_lfence : 664 tsc_get_timecount_lfence; 665 } 666 } else { 667 tsc_timecounter.tc_get_timecount = shift > 0 ? 668 tsc_get_timecount_low : tsc_get_timecount; 669 } 670 if (shift > 0) { 671 tsc_timecounter.tc_name = "TSC-low"; 672 if (bootverbose) 673 printf("TSC timecounter discards lower %d bit(s)\n", 674 shift); 675 } 676 if (tsc_freq != 0) { 677 tsc_timecounter.tc_frequency = tsc_freq >> shift; 678 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 679 680 /* 681 * Timecounter registration is deferred until after late 682 * calibration is finished. 683 */ 684 } 685 } 686 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 687 688 static void 689 tsc_update_freq(uint64_t new_freq) 690 { 691 atomic_store_rel_64(&tsc_freq, new_freq); 692 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 693 new_freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 694 } 695 696 /* 697 * Perform late calibration of the TSC frequency once ACPI-based timecounters 698 * are available. At this point timehands are not set up, so we read the 699 * highest-quality timecounter directly rather than using (s)binuptime(). 700 */ 701 void 702 tsc_calibrate(void) 703 { 704 struct timecounter *tc; 705 uint64_t freq_khz, tsc_start, tsc_end; 706 u_int t_start, t_end; 707 register_t flags; 708 int cpu; 709 710 if (tsc_disabled) 711 return; 712 713 tc = atomic_load_ptr(&timecounter); 714 715 flags = intr_disable(); 716 cpu = curcpu; 717 tsc_start = rdtsc_ordered(); 718 t_start = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 719 intr_restore(flags); 720 721 DELAY(1000000); 722 723 thread_lock(curthread); 724 sched_bind(curthread, cpu); 725 726 flags = intr_disable(); 727 tsc_end = rdtsc_ordered(); 728 t_end = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 729 intr_restore(flags); 730 731 sched_unbind(curthread); 732 thread_unlock(curthread); 733 734 if (t_end <= t_start) { 735 /* Assume that the counter has wrapped around at most once. */ 736 t_end += (uint64_t)tc->tc_counter_mask + 1; 737 } 738 739 freq_khz = tc->tc_frequency * (tsc_end - tsc_start) / (t_end - t_start); 740 741 tsc_update_freq(freq_khz); 742 tc_init(&tsc_timecounter); 743 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 744 } 745 746 void 747 resume_TSC(void) 748 { 749 #ifdef SMP 750 int quality; 751 752 /* If TSC was not good on boot, it is unlikely to become good now. */ 753 if (tsc_timecounter.tc_quality < 0) 754 return; 755 /* Nothing to do with UP. */ 756 if (mp_ncpus < 2) 757 return; 758 759 /* 760 * If TSC was good, a single synchronization should be enough, 761 * but honour smp_tsc_adjust if it's set. 762 */ 763 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 764 if (quality != tsc_timecounter.tc_quality) { 765 printf("TSC timecounter quality changed: %d -> %d\n", 766 tsc_timecounter.tc_quality, quality); 767 tsc_timecounter.tc_quality = quality; 768 } 769 #endif /* SMP */ 770 } 771 772 /* 773 * When cpufreq levels change, find out about the (new) max frequency. We 774 * use this to update CPU accounting in case it got a lower estimate at boot. 775 */ 776 static void 777 tsc_levels_changed(void *arg, int unit) 778 { 779 device_t cf_dev; 780 struct cf_level *levels; 781 int count, error; 782 uint64_t max_freq; 783 784 /* Only use values from the first CPU, assuming all are equal. */ 785 if (unit != 0) 786 return; 787 788 /* Find the appropriate cpufreq device instance. */ 789 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 790 if (cf_dev == NULL) { 791 printf("tsc_levels_changed() called but no cpufreq device?\n"); 792 return; 793 } 794 795 /* Get settings from the device and find the max frequency. */ 796 count = 64; 797 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 798 if (levels == NULL) 799 return; 800 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 801 if (error == 0 && count != 0) { 802 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 803 set_cputicker(rdtsc, max_freq, 1); 804 } else 805 printf("tsc_levels_changed: no max freq found\n"); 806 free(levels, M_TEMP); 807 } 808 809 /* 810 * If the TSC timecounter is in use, veto the pending change. It may be 811 * possible in the future to handle a dynamically-changing timecounter rate. 812 */ 813 static void 814 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 815 { 816 817 if (*status != 0 || timecounter != &tsc_timecounter) 818 return; 819 820 printf("timecounter TSC must not be in use when " 821 "changing frequencies; change denied\n"); 822 *status = EBUSY; 823 } 824 825 /* Update TSC freq with the value indicated by the caller. */ 826 static void 827 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 828 { 829 uint64_t freq; 830 831 /* If there was an error during the transition, don't do anything. */ 832 if (tsc_disabled || status != 0) 833 return; 834 835 /* Total setting for this level gives the new frequency in MHz. */ 836 freq = (uint64_t)level->total_set.freq * 1000000; 837 tsc_update_freq(freq); 838 } 839 840 static int 841 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 842 { 843 int error; 844 uint64_t freq; 845 846 freq = atomic_load_acq_64(&tsc_freq); 847 if (freq == 0) 848 return (EOPNOTSUPP); 849 error = sysctl_handle_64(oidp, &freq, 0, req); 850 if (error == 0 && req->newptr != NULL) 851 tsc_update_freq(freq); 852 return (error); 853 } 854 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, 855 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 856 0, 0, sysctl_machdep_tsc_freq, "QU", 857 "Time Stamp Counter frequency"); 858 859 static u_int 860 tsc_get_timecount(struct timecounter *tc __unused) 861 { 862 863 return (rdtsc32()); 864 } 865 866 static u_int 867 tscp_get_timecount(struct timecounter *tc __unused) 868 { 869 870 return (rdtscp32()); 871 } 872 873 static inline u_int 874 tsc_get_timecount_low(struct timecounter *tc) 875 { 876 uint32_t rv; 877 878 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 879 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 880 return (rv); 881 } 882 883 static u_int 884 tscp_get_timecount_low(struct timecounter *tc) 885 { 886 uint32_t rv; 887 888 __asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0" 889 : "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx"); 890 return (rv); 891 } 892 893 static u_int 894 tsc_get_timecount_lfence(struct timecounter *tc __unused) 895 { 896 897 lfence(); 898 return (rdtsc32()); 899 } 900 901 static u_int 902 tsc_get_timecount_low_lfence(struct timecounter *tc) 903 { 904 905 lfence(); 906 return (tsc_get_timecount_low(tc)); 907 } 908 909 static u_int 910 tsc_get_timecount_mfence(struct timecounter *tc __unused) 911 { 912 913 mfence(); 914 return (rdtsc32()); 915 } 916 917 static u_int 918 tsc_get_timecount_low_mfence(struct timecounter *tc) 919 { 920 921 mfence(); 922 return (tsc_get_timecount_low(tc)); 923 } 924 925 static uint32_t 926 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 927 { 928 929 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 930 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 931 vdso_th->th_x86_hpet_idx = 0xffffffff; 932 vdso_th->th_x86_pvc_last_systime = 0; 933 vdso_th->th_x86_pvc_stable_mask = 0; 934 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 935 return (1); 936 } 937 938 #ifdef COMPAT_FREEBSD32 939 static uint32_t 940 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 941 struct timecounter *tc) 942 { 943 944 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 945 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 946 vdso_th32->th_x86_hpet_idx = 0xffffffff; 947 vdso_th32->th_x86_pvc_last_systime = 0; 948 vdso_th32->th_x86_pvc_stable_mask = 0; 949 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 950 return (1); 951 } 952 #endif 953