1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_clock.h" 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/cpu.h> 37 #include <sys/eventhandler.h> 38 #include <sys/limits.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/kernel.h> 45 #include <sys/power.h> 46 #include <sys/smp.h> 47 #include <sys/vdso.h> 48 #include <machine/clock.h> 49 #include <machine/cputypes.h> 50 #include <machine/md_var.h> 51 #include <machine/specialreg.h> 52 #include <x86/vmware.h> 53 #include <dev/acpica/acpi_hpet.h> 54 #include <contrib/dev/acpica/include/acpi.h> 55 56 #include "cpufreq_if.h" 57 58 uint64_t tsc_freq; 59 int tsc_is_invariant; 60 int tsc_perf_stat; 61 62 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 63 64 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 65 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 66 67 #ifdef SMP 68 int smp_tsc; 69 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 70 "Indicates whether the TSC is safe to use in SMP mode"); 71 72 int smp_tsc_adjust = 0; 73 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 74 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 75 #endif 76 77 static int tsc_shift = 1; 78 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 79 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 80 81 static int tsc_disabled; 82 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 83 "Disable x86 Time Stamp Counter"); 84 85 static int tsc_skip_calibration; 86 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN | 87 CTLFLAG_NOFETCH, &tsc_skip_calibration, 0, 88 "Disable TSC frequency calibration"); 89 90 static void tsc_freq_changed(void *arg, const struct cf_level *level, 91 int status); 92 static void tsc_freq_changing(void *arg, const struct cf_level *level, 93 int *status); 94 static unsigned tsc_get_timecount(struct timecounter *tc); 95 static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 96 static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 97 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 98 static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 99 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 100 static void tsc_levels_changed(void *arg, int unit); 101 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 102 struct timecounter *tc); 103 #ifdef COMPAT_FREEBSD32 104 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 105 struct timecounter *tc); 106 #endif 107 108 static struct timecounter tsc_timecounter = { 109 .tc_get_timecount = tsc_get_timecount, 110 .tc_counter_mask = ~0u, 111 .tc_name = "TSC", 112 .tc_quality = 800, /* adjusted in code */ 113 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 114 #ifdef COMPAT_FREEBSD32 115 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 116 #endif 117 }; 118 119 static void 120 tsc_freq_vmware(void) 121 { 122 u_int regs[4]; 123 124 if (hv_high >= 0x40000010) { 125 do_cpuid(0x40000010, regs); 126 tsc_freq = regs[0] * 1000; 127 } else { 128 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 129 if (regs[1] != UINT_MAX) 130 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 131 } 132 tsc_is_invariant = 1; 133 } 134 135 /* 136 * Calculate TSC frequency using information from the CPUID leaf 0x15 137 * 'Time Stamp Counter and Nominal Core Crystal Clock'. It should be 138 * an improvement over the parsing of the CPU model name in 139 * tsc_freq_intel(), when available. 140 */ 141 static bool 142 tsc_freq_cpuid(void) 143 { 144 u_int regs[4]; 145 146 if (cpu_high < 0x15) 147 return (false); 148 do_cpuid(0x15, regs); 149 if (regs[0] == 0 || regs[1] == 0 || regs[2] == 0) 150 return (false); 151 tsc_freq = (uint64_t)regs[2] * regs[1] / regs[0]; 152 return (true); 153 } 154 155 static void 156 tsc_freq_intel(void) 157 { 158 char brand[48]; 159 u_int regs[4]; 160 uint64_t freq; 161 char *p; 162 u_int i; 163 164 /* 165 * Intel Processor Identification and the CPUID Instruction 166 * Application Note 485. 167 * http://www.intel.com/assets/pdf/appnote/241618.pdf 168 */ 169 if (cpu_exthigh >= 0x80000004) { 170 p = brand; 171 for (i = 0x80000002; i < 0x80000005; i++) { 172 do_cpuid(i, regs); 173 memcpy(p, regs, sizeof(regs)); 174 p += sizeof(regs); 175 } 176 p = NULL; 177 for (i = 0; i < sizeof(brand) - 1; i++) 178 if (brand[i] == 'H' && brand[i + 1] == 'z') 179 p = brand + i; 180 if (p != NULL) { 181 p -= 5; 182 switch (p[4]) { 183 case 'M': 184 i = 1; 185 break; 186 case 'G': 187 i = 1000; 188 break; 189 case 'T': 190 i = 1000000; 191 break; 192 default: 193 return; 194 } 195 #define C2D(c) ((c) - '0') 196 if (p[1] == '.') { 197 freq = C2D(p[0]) * 1000; 198 freq += C2D(p[2]) * 100; 199 freq += C2D(p[3]) * 10; 200 freq *= i * 1000; 201 } else { 202 freq = C2D(p[0]) * 1000; 203 freq += C2D(p[1]) * 100; 204 freq += C2D(p[2]) * 10; 205 freq += C2D(p[3]); 206 freq *= i * 1000000; 207 } 208 #undef C2D 209 tsc_freq = freq; 210 } 211 } 212 } 213 214 static void 215 probe_tsc_freq(void) 216 { 217 u_int regs[4]; 218 uint64_t tsc1, tsc2; 219 uint16_t bootflags; 220 221 if (cpu_high >= 6) { 222 do_cpuid(6, regs); 223 if ((regs[2] & CPUID_PERF_STAT) != 0) { 224 /* 225 * XXX Some emulators expose host CPUID without actual 226 * support for these MSRs. We must test whether they 227 * really work. 228 */ 229 wrmsr(MSR_MPERF, 0); 230 wrmsr(MSR_APERF, 0); 231 DELAY(10); 232 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 233 tsc_perf_stat = 1; 234 } 235 } 236 237 if (vm_guest == VM_GUEST_VMWARE) { 238 tsc_freq_vmware(); 239 return; 240 } 241 242 switch (cpu_vendor_id) { 243 case CPU_VENDOR_AMD: 244 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 245 (vm_guest == VM_GUEST_NO && 246 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 247 tsc_is_invariant = 1; 248 if (cpu_feature & CPUID_SSE2) { 249 tsc_timecounter.tc_get_timecount = 250 tsc_get_timecount_mfence; 251 } 252 break; 253 case CPU_VENDOR_INTEL: 254 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 255 (vm_guest == VM_GUEST_NO && 256 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 257 CPUID_TO_MODEL(cpu_id) >= 0xe) || 258 (CPUID_TO_FAMILY(cpu_id) == 0xf && 259 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 260 tsc_is_invariant = 1; 261 if (cpu_feature & CPUID_SSE2) { 262 tsc_timecounter.tc_get_timecount = 263 tsc_get_timecount_lfence; 264 } 265 break; 266 case CPU_VENDOR_CENTAUR: 267 if (vm_guest == VM_GUEST_NO && 268 CPUID_TO_FAMILY(cpu_id) == 0x6 && 269 CPUID_TO_MODEL(cpu_id) >= 0xf && 270 (rdmsr(0x1203) & 0x100000000ULL) == 0) 271 tsc_is_invariant = 1; 272 if (cpu_feature & CPUID_SSE2) { 273 tsc_timecounter.tc_get_timecount = 274 tsc_get_timecount_lfence; 275 } 276 break; 277 } 278 279 if (!TUNABLE_INT_FETCH("machdep.disable_tsc_calibration", 280 &tsc_skip_calibration)) { 281 /* 282 * User did not give the order about calibration. 283 * If he did, we do not try to guess. 284 * 285 * Otherwise, if ACPI FADT reports that the platform 286 * is legacy-free and CPUID provides TSC frequency, 287 * use it. The calibration could fail anyway since 288 * ISA timer can be absent or power gated. 289 */ 290 if (acpi_get_fadt_bootflags(&bootflags) && 291 (bootflags & ACPI_FADT_LEGACY_DEVICES) == 0 && 292 tsc_freq_cpuid()) { 293 printf("Skipping TSC calibration since no legacy " 294 "devices reported by FADT and CPUID works\n"); 295 tsc_skip_calibration = 1; 296 } 297 } 298 if (tsc_skip_calibration) { 299 if (tsc_freq_cpuid()) 300 ; 301 else if (cpu_vendor_id == CPU_VENDOR_INTEL) 302 tsc_freq_intel(); 303 } else { 304 if (bootverbose) 305 printf("Calibrating TSC clock ... "); 306 tsc1 = rdtsc(); 307 DELAY(1000000); 308 tsc2 = rdtsc(); 309 tsc_freq = tsc2 - tsc1; 310 } 311 if (bootverbose) 312 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 313 } 314 315 void 316 init_TSC(void) 317 { 318 319 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 320 return; 321 322 #ifdef __i386__ 323 /* The TSC is known to be broken on certain CPUs. */ 324 switch (cpu_vendor_id) { 325 case CPU_VENDOR_AMD: 326 switch (cpu_id & 0xFF0) { 327 case 0x500: 328 /* K5 Model 0 */ 329 return; 330 } 331 break; 332 case CPU_VENDOR_CENTAUR: 333 switch (cpu_id & 0xff0) { 334 case 0x540: 335 /* 336 * http://www.centtech.com/c6_data_sheet.pdf 337 * 338 * I-12 RDTSC may return incoherent values in EDX:EAX 339 * I-13 RDTSC hangs when certain event counters are used 340 */ 341 return; 342 } 343 break; 344 case CPU_VENDOR_NSC: 345 switch (cpu_id & 0xff0) { 346 case 0x540: 347 if ((cpu_id & CPUID_STEPPING) == 0) 348 return; 349 break; 350 } 351 break; 352 } 353 #endif 354 355 probe_tsc_freq(); 356 357 /* 358 * Inform CPU accounting about our boot-time clock rate. This will 359 * be updated if someone loads a cpufreq driver after boot that 360 * discovers a new max frequency. 361 */ 362 if (tsc_freq != 0) 363 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 364 365 if (tsc_is_invariant) 366 return; 367 368 /* Register to find out about changes in CPU frequency. */ 369 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 370 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 371 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 372 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 373 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 374 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 375 } 376 377 #ifdef SMP 378 379 /* 380 * RDTSC is not a serializing instruction, and does not drain 381 * instruction stream, so we need to drain the stream before executing 382 * it. It could be fixed by use of RDTSCP, except the instruction is 383 * not available everywhere. 384 * 385 * Use CPUID for draining in the boot-time SMP constistency test. The 386 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 387 * and VIA) when SSE2 is present, and nothing on older machines which 388 * also do not issue RDTSC prematurely. There, testing for SSE2 and 389 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 390 * 391 * Do not use do_cpuid(), since we do not need CPUID results, which 392 * have to be written into memory with do_cpuid(). 393 */ 394 #define TSC_READ(x) \ 395 static void \ 396 tsc_read_##x(void *arg) \ 397 { \ 398 uint64_t *tsc = arg; \ 399 u_int cpu = PCPU_GET(cpuid); \ 400 \ 401 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 402 tsc[cpu * 3 + x] = rdtsc(); \ 403 } 404 TSC_READ(0) 405 TSC_READ(1) 406 TSC_READ(2) 407 #undef TSC_READ 408 409 #define N 1000 410 411 static void 412 comp_smp_tsc(void *arg) 413 { 414 uint64_t *tsc; 415 int64_t d1, d2; 416 u_int cpu = PCPU_GET(cpuid); 417 u_int i, j, size; 418 419 size = (mp_maxid + 1) * 3; 420 for (i = 0, tsc = arg; i < N; i++, tsc += size) 421 CPU_FOREACH(j) { 422 if (j == cpu) 423 continue; 424 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 425 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 426 if (d1 <= 0 || d2 <= 0) { 427 smp_tsc = 0; 428 return; 429 } 430 } 431 } 432 433 static void 434 adj_smp_tsc(void *arg) 435 { 436 uint64_t *tsc; 437 int64_t d, min, max; 438 u_int cpu = PCPU_GET(cpuid); 439 u_int first, i, size; 440 441 first = CPU_FIRST(); 442 if (cpu == first) 443 return; 444 min = INT64_MIN; 445 max = INT64_MAX; 446 size = (mp_maxid + 1) * 3; 447 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 448 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 449 if (d > min) 450 min = d; 451 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 452 if (d > min) 453 min = d; 454 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 455 if (d < max) 456 max = d; 457 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 458 if (d < max) 459 max = d; 460 } 461 if (min > max) 462 return; 463 d = min / 2 + max / 2; 464 __asm __volatile ( 465 "movl $0x10, %%ecx\n\t" 466 "rdmsr\n\t" 467 "addl %%edi, %%eax\n\t" 468 "adcl %%esi, %%edx\n\t" 469 "wrmsr\n" 470 : /* No output */ 471 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 472 : "ax", "cx", "dx", "cc" 473 ); 474 } 475 476 static int 477 test_tsc(int adj_max_count) 478 { 479 uint64_t *data, *tsc; 480 u_int i, size, adj; 481 482 if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 483 return (-100); 484 size = (mp_maxid + 1) * 3; 485 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 486 adj = 0; 487 retry: 488 for (i = 0, tsc = data; i < N; i++, tsc += size) 489 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 490 smp_tsc = 1; /* XXX */ 491 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 492 smp_no_rendezvous_barrier, data); 493 if (!smp_tsc && adj < adj_max_count) { 494 adj++; 495 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 496 smp_no_rendezvous_barrier, data); 497 goto retry; 498 } 499 free(data, M_TEMP); 500 if (bootverbose) 501 printf("SMP: %sed TSC synchronization test%s\n", 502 smp_tsc ? "pass" : "fail", 503 adj > 0 ? " after adjustment" : ""); 504 if (smp_tsc && tsc_is_invariant) { 505 switch (cpu_vendor_id) { 506 case CPU_VENDOR_AMD: 507 /* 508 * Starting with Family 15h processors, TSC clock 509 * source is in the north bridge. Check whether 510 * we have a single-socket/multi-core platform. 511 * XXX Need more work for complex cases. 512 */ 513 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 514 (amd_feature2 & AMDID2_CMP) == 0 || 515 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 516 break; 517 return (1000); 518 case CPU_VENDOR_INTEL: 519 /* 520 * XXX Assume Intel platforms have synchronized TSCs. 521 */ 522 return (1000); 523 } 524 return (800); 525 } 526 return (-100); 527 } 528 529 #undef N 530 531 #endif /* SMP */ 532 533 static void 534 init_TSC_tc(void) 535 { 536 uint64_t max_freq; 537 int shift; 538 539 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 540 return; 541 542 /* 543 * Limit timecounter frequency to fit in an int and prevent it from 544 * overflowing too fast. 545 */ 546 max_freq = UINT_MAX; 547 548 /* 549 * We can not use the TSC if we support APM. Precise timekeeping 550 * on an APM'ed machine is at best a fools pursuit, since 551 * any and all of the time spent in various SMM code can't 552 * be reliably accounted for. Reading the RTC is your only 553 * source of reliable time info. The i8254 loses too, of course, 554 * but we need to have some kind of time... 555 * We don't know at this point whether APM is going to be used 556 * or not, nor when it might be activated. Play it safe. 557 */ 558 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 559 tsc_timecounter.tc_quality = -1000; 560 if (bootverbose) 561 printf("TSC timecounter disabled: APM enabled.\n"); 562 goto init; 563 } 564 565 /* 566 * Intel CPUs without a C-state invariant TSC can stop the TSC 567 * in either C2 or C3. Disable use of C2 and C3 while using 568 * the TSC as the timecounter. The timecounter can be changed 569 * to enable C2 and C3. 570 * 571 * Note that the TSC is used as the cputicker for computing 572 * thread runtime regardless of the timecounter setting, so 573 * using an alternate timecounter and enabling C2 or C3 can 574 * result incorrect runtimes for kernel idle threads (but not 575 * for any non-idle threads). 576 */ 577 if (cpu_vendor_id == CPU_VENDOR_INTEL && 578 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 579 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 580 if (bootverbose) 581 printf("TSC timecounter disables C2 and C3.\n"); 582 } 583 584 /* 585 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 586 * are synchronized. If the user is sure that the system has 587 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 588 * non-zero value. The TSC seems unreliable in virtualized SMP 589 * environments, so it is set to a negative quality in those cases. 590 */ 591 #ifdef SMP 592 if (mp_ncpus > 1) 593 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust); 594 else 595 #endif /* SMP */ 596 if (tsc_is_invariant) 597 tsc_timecounter.tc_quality = 1000; 598 max_freq >>= tsc_shift; 599 600 init: 601 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 602 ; 603 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 604 if (cpu_vendor_id == CPU_VENDOR_AMD) { 605 tsc_timecounter.tc_get_timecount = shift > 0 ? 606 tsc_get_timecount_low_mfence : 607 tsc_get_timecount_mfence; 608 } else { 609 tsc_timecounter.tc_get_timecount = shift > 0 ? 610 tsc_get_timecount_low_lfence : 611 tsc_get_timecount_lfence; 612 } 613 } else { 614 tsc_timecounter.tc_get_timecount = shift > 0 ? 615 tsc_get_timecount_low : tsc_get_timecount; 616 } 617 if (shift > 0) { 618 tsc_timecounter.tc_name = "TSC-low"; 619 if (bootverbose) 620 printf("TSC timecounter discards lower %d bit(s)\n", 621 shift); 622 } 623 if (tsc_freq != 0) { 624 tsc_timecounter.tc_frequency = tsc_freq >> shift; 625 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 626 tc_init(&tsc_timecounter); 627 } 628 } 629 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 630 631 void 632 resume_TSC(void) 633 { 634 #ifdef SMP 635 int quality; 636 637 /* If TSC was not good on boot, it is unlikely to become good now. */ 638 if (tsc_timecounter.tc_quality < 0) 639 return; 640 /* Nothing to do with UP. */ 641 if (mp_ncpus < 2) 642 return; 643 644 /* 645 * If TSC was good, a single synchronization should be enough, 646 * but honour smp_tsc_adjust if it's set. 647 */ 648 quality = test_tsc(MAX(smp_tsc_adjust, 1)); 649 if (quality != tsc_timecounter.tc_quality) { 650 printf("TSC timecounter quality changed: %d -> %d\n", 651 tsc_timecounter.tc_quality, quality); 652 tsc_timecounter.tc_quality = quality; 653 } 654 #endif /* SMP */ 655 } 656 657 /* 658 * When cpufreq levels change, find out about the (new) max frequency. We 659 * use this to update CPU accounting in case it got a lower estimate at boot. 660 */ 661 static void 662 tsc_levels_changed(void *arg, int unit) 663 { 664 device_t cf_dev; 665 struct cf_level *levels; 666 int count, error; 667 uint64_t max_freq; 668 669 /* Only use values from the first CPU, assuming all are equal. */ 670 if (unit != 0) 671 return; 672 673 /* Find the appropriate cpufreq device instance. */ 674 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 675 if (cf_dev == NULL) { 676 printf("tsc_levels_changed() called but no cpufreq device?\n"); 677 return; 678 } 679 680 /* Get settings from the device and find the max frequency. */ 681 count = 64; 682 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 683 if (levels == NULL) 684 return; 685 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 686 if (error == 0 && count != 0) { 687 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 688 set_cputicker(rdtsc, max_freq, 1); 689 } else 690 printf("tsc_levels_changed: no max freq found\n"); 691 free(levels, M_TEMP); 692 } 693 694 /* 695 * If the TSC timecounter is in use, veto the pending change. It may be 696 * possible in the future to handle a dynamically-changing timecounter rate. 697 */ 698 static void 699 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 700 { 701 702 if (*status != 0 || timecounter != &tsc_timecounter) 703 return; 704 705 printf("timecounter TSC must not be in use when " 706 "changing frequencies; change denied\n"); 707 *status = EBUSY; 708 } 709 710 /* Update TSC freq with the value indicated by the caller. */ 711 static void 712 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 713 { 714 uint64_t freq; 715 716 /* If there was an error during the transition, don't do anything. */ 717 if (tsc_disabled || status != 0) 718 return; 719 720 /* Total setting for this level gives the new frequency in MHz. */ 721 freq = (uint64_t)level->total_set.freq * 1000000; 722 atomic_store_rel_64(&tsc_freq, freq); 723 tsc_timecounter.tc_frequency = 724 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 725 } 726 727 static int 728 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 729 { 730 int error; 731 uint64_t freq; 732 733 freq = atomic_load_acq_64(&tsc_freq); 734 if (freq == 0) 735 return (EOPNOTSUPP); 736 error = sysctl_handle_64(oidp, &freq, 0, req); 737 if (error == 0 && req->newptr != NULL) { 738 atomic_store_rel_64(&tsc_freq, freq); 739 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 740 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 741 } 742 return (error); 743 } 744 745 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 746 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 747 748 static u_int 749 tsc_get_timecount(struct timecounter *tc __unused) 750 { 751 752 return (rdtsc32()); 753 } 754 755 static inline u_int 756 tsc_get_timecount_low(struct timecounter *tc) 757 { 758 uint32_t rv; 759 760 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 761 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 762 return (rv); 763 } 764 765 static u_int 766 tsc_get_timecount_lfence(struct timecounter *tc __unused) 767 { 768 769 lfence(); 770 return (rdtsc32()); 771 } 772 773 static u_int 774 tsc_get_timecount_low_lfence(struct timecounter *tc) 775 { 776 777 lfence(); 778 return (tsc_get_timecount_low(tc)); 779 } 780 781 static u_int 782 tsc_get_timecount_mfence(struct timecounter *tc __unused) 783 { 784 785 mfence(); 786 return (rdtsc32()); 787 } 788 789 static u_int 790 tsc_get_timecount_low_mfence(struct timecounter *tc) 791 { 792 793 mfence(); 794 return (tsc_get_timecount_low(tc)); 795 } 796 797 static uint32_t 798 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 799 { 800 801 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 802 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 803 vdso_th->th_x86_hpet_idx = 0xffffffff; 804 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 805 return (1); 806 } 807 808 #ifdef COMPAT_FREEBSD32 809 static uint32_t 810 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 811 struct timecounter *tc) 812 { 813 814 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 815 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 816 vdso_th32->th_x86_hpet_idx = 0xffffffff; 817 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 818 return (1); 819 } 820 #endif 821