1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998-2003 Poul-Henning Kamp 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_compat.h" 33 #include "opt_clock.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/cpu.h> 38 #include <sys/limits.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/time.h> 43 #include <sys/timetc.h> 44 #include <sys/kernel.h> 45 #include <sys/power.h> 46 #include <sys/smp.h> 47 #include <sys/vdso.h> 48 #include <machine/clock.h> 49 #include <machine/cputypes.h> 50 #include <machine/md_var.h> 51 #include <machine/specialreg.h> 52 #include <x86/vmware.h> 53 #include <dev/acpica/acpi_hpet.h> 54 55 #include "cpufreq_if.h" 56 57 uint64_t tsc_freq; 58 int tsc_is_invariant; 59 int tsc_perf_stat; 60 61 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 62 63 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 64 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 65 66 #ifdef SMP 67 int smp_tsc; 68 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 69 "Indicates whether the TSC is safe to use in SMP mode"); 70 71 int smp_tsc_adjust = 0; 72 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 73 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 74 #endif 75 76 static int tsc_shift = 1; 77 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 78 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 79 80 static int tsc_disabled; 81 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 82 "Disable x86 Time Stamp Counter"); 83 84 static int tsc_skip_calibration; 85 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 86 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 87 88 static void tsc_freq_changed(void *arg, const struct cf_level *level, 89 int status); 90 static void tsc_freq_changing(void *arg, const struct cf_level *level, 91 int *status); 92 static unsigned tsc_get_timecount(struct timecounter *tc); 93 static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 94 static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 95 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 96 static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 97 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 98 static void tsc_levels_changed(void *arg, int unit); 99 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 100 struct timecounter *tc); 101 #ifdef COMPAT_FREEBSD32 102 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 103 struct timecounter *tc); 104 #endif 105 106 static struct timecounter tsc_timecounter = { 107 .tc_get_timecount = tsc_get_timecount, 108 .tc_counter_mask = ~0u, 109 .tc_name = "TSC", 110 .tc_quality = 800, /* adjusted in code */ 111 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 112 #ifdef COMPAT_FREEBSD32 113 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 114 #endif 115 }; 116 117 static void 118 tsc_freq_vmware(void) 119 { 120 u_int regs[4]; 121 122 if (hv_high >= 0x40000010) { 123 do_cpuid(0x40000010, regs); 124 tsc_freq = regs[0] * 1000; 125 } else { 126 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 127 if (regs[1] != UINT_MAX) 128 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 129 } 130 tsc_is_invariant = 1; 131 } 132 133 static void 134 tsc_freq_intel(void) 135 { 136 char brand[48]; 137 u_int regs[4]; 138 uint64_t freq; 139 char *p; 140 u_int i; 141 142 /* 143 * Intel Processor Identification and the CPUID Instruction 144 * Application Note 485. 145 * http://www.intel.com/assets/pdf/appnote/241618.pdf 146 */ 147 if (cpu_exthigh >= 0x80000004) { 148 p = brand; 149 for (i = 0x80000002; i < 0x80000005; i++) { 150 do_cpuid(i, regs); 151 memcpy(p, regs, sizeof(regs)); 152 p += sizeof(regs); 153 } 154 p = NULL; 155 for (i = 0; i < sizeof(brand) - 1; i++) 156 if (brand[i] == 'H' && brand[i + 1] == 'z') 157 p = brand + i; 158 if (p != NULL) { 159 p -= 5; 160 switch (p[4]) { 161 case 'M': 162 i = 1; 163 break; 164 case 'G': 165 i = 1000; 166 break; 167 case 'T': 168 i = 1000000; 169 break; 170 default: 171 return; 172 } 173 #define C2D(c) ((c) - '0') 174 if (p[1] == '.') { 175 freq = C2D(p[0]) * 1000; 176 freq += C2D(p[2]) * 100; 177 freq += C2D(p[3]) * 10; 178 freq *= i * 1000; 179 } else { 180 freq = C2D(p[0]) * 1000; 181 freq += C2D(p[1]) * 100; 182 freq += C2D(p[2]) * 10; 183 freq += C2D(p[3]); 184 freq *= i * 1000000; 185 } 186 #undef C2D 187 tsc_freq = freq; 188 } 189 } 190 } 191 192 static void 193 probe_tsc_freq(void) 194 { 195 u_int regs[4]; 196 uint64_t tsc1, tsc2; 197 198 if (cpu_high >= 6) { 199 do_cpuid(6, regs); 200 if ((regs[2] & CPUID_PERF_STAT) != 0) { 201 /* 202 * XXX Some emulators expose host CPUID without actual 203 * support for these MSRs. We must test whether they 204 * really work. 205 */ 206 wrmsr(MSR_MPERF, 0); 207 wrmsr(MSR_APERF, 0); 208 DELAY(10); 209 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 210 tsc_perf_stat = 1; 211 } 212 } 213 214 if (vm_guest == VM_GUEST_VMWARE) { 215 tsc_freq_vmware(); 216 return; 217 } 218 219 switch (cpu_vendor_id) { 220 case CPU_VENDOR_AMD: 221 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 222 (vm_guest == VM_GUEST_NO && 223 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 224 tsc_is_invariant = 1; 225 if (cpu_feature & CPUID_SSE2) { 226 tsc_timecounter.tc_get_timecount = 227 tsc_get_timecount_mfence; 228 } 229 break; 230 case CPU_VENDOR_INTEL: 231 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 232 (vm_guest == VM_GUEST_NO && 233 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 234 CPUID_TO_MODEL(cpu_id) >= 0xe) || 235 (CPUID_TO_FAMILY(cpu_id) == 0xf && 236 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 237 tsc_is_invariant = 1; 238 if (cpu_feature & CPUID_SSE2) { 239 tsc_timecounter.tc_get_timecount = 240 tsc_get_timecount_lfence; 241 } 242 break; 243 case CPU_VENDOR_CENTAUR: 244 if (vm_guest == VM_GUEST_NO && 245 CPUID_TO_FAMILY(cpu_id) == 0x6 && 246 CPUID_TO_MODEL(cpu_id) >= 0xf && 247 (rdmsr(0x1203) & 0x100000000ULL) == 0) 248 tsc_is_invariant = 1; 249 if (cpu_feature & CPUID_SSE2) { 250 tsc_timecounter.tc_get_timecount = 251 tsc_get_timecount_lfence; 252 } 253 break; 254 } 255 256 if (tsc_skip_calibration) { 257 if (cpu_vendor_id == CPU_VENDOR_INTEL) 258 tsc_freq_intel(); 259 return; 260 } 261 262 if (bootverbose) 263 printf("Calibrating TSC clock ... "); 264 tsc1 = rdtsc(); 265 DELAY(1000000); 266 tsc2 = rdtsc(); 267 tsc_freq = tsc2 - tsc1; 268 if (bootverbose) 269 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 270 } 271 272 void 273 init_TSC(void) 274 { 275 276 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 277 return; 278 279 #ifdef __i386__ 280 /* The TSC is known to be broken on certain CPUs. */ 281 switch (cpu_vendor_id) { 282 case CPU_VENDOR_AMD: 283 switch (cpu_id & 0xFF0) { 284 case 0x500: 285 /* K5 Model 0 */ 286 return; 287 } 288 break; 289 case CPU_VENDOR_CENTAUR: 290 switch (cpu_id & 0xff0) { 291 case 0x540: 292 /* 293 * http://www.centtech.com/c6_data_sheet.pdf 294 * 295 * I-12 RDTSC may return incoherent values in EDX:EAX 296 * I-13 RDTSC hangs when certain event counters are used 297 */ 298 return; 299 } 300 break; 301 case CPU_VENDOR_NSC: 302 switch (cpu_id & 0xff0) { 303 case 0x540: 304 if ((cpu_id & CPUID_STEPPING) == 0) 305 return; 306 break; 307 } 308 break; 309 } 310 #endif 311 312 probe_tsc_freq(); 313 314 /* 315 * Inform CPU accounting about our boot-time clock rate. This will 316 * be updated if someone loads a cpufreq driver after boot that 317 * discovers a new max frequency. 318 */ 319 if (tsc_freq != 0) 320 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 321 322 if (tsc_is_invariant) 323 return; 324 325 /* Register to find out about changes in CPU frequency. */ 326 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 327 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 328 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 329 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 330 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 331 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 332 } 333 334 #ifdef SMP 335 336 /* 337 * RDTSC is not a serializing instruction, and does not drain 338 * instruction stream, so we need to drain the stream before executing 339 * it. It could be fixed by use of RDTSCP, except the instruction is 340 * not available everywhere. 341 * 342 * Use CPUID for draining in the boot-time SMP constistency test. The 343 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 344 * and VIA) when SSE2 is present, and nothing on older machines which 345 * also do not issue RDTSC prematurely. There, testing for SSE2 and 346 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 347 * 348 * Do not use do_cpuid(), since we do not need CPUID results, which 349 * have to be written into memory with do_cpuid(). 350 */ 351 #define TSC_READ(x) \ 352 static void \ 353 tsc_read_##x(void *arg) \ 354 { \ 355 uint64_t *tsc = arg; \ 356 u_int cpu = PCPU_GET(cpuid); \ 357 \ 358 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 359 tsc[cpu * 3 + x] = rdtsc(); \ 360 } 361 TSC_READ(0) 362 TSC_READ(1) 363 TSC_READ(2) 364 #undef TSC_READ 365 366 #define N 1000 367 368 static void 369 comp_smp_tsc(void *arg) 370 { 371 uint64_t *tsc; 372 int64_t d1, d2; 373 u_int cpu = PCPU_GET(cpuid); 374 u_int i, j, size; 375 376 size = (mp_maxid + 1) * 3; 377 for (i = 0, tsc = arg; i < N; i++, tsc += size) 378 CPU_FOREACH(j) { 379 if (j == cpu) 380 continue; 381 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 382 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 383 if (d1 <= 0 || d2 <= 0) { 384 smp_tsc = 0; 385 return; 386 } 387 } 388 } 389 390 static void 391 adj_smp_tsc(void *arg) 392 { 393 uint64_t *tsc; 394 int64_t d, min, max; 395 u_int cpu = PCPU_GET(cpuid); 396 u_int first, i, size; 397 398 first = CPU_FIRST(); 399 if (cpu == first) 400 return; 401 min = INT64_MIN; 402 max = INT64_MAX; 403 size = (mp_maxid + 1) * 3; 404 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 405 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 406 if (d > min) 407 min = d; 408 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 409 if (d > min) 410 min = d; 411 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 412 if (d < max) 413 max = d; 414 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 415 if (d < max) 416 max = d; 417 } 418 if (min > max) 419 return; 420 d = min / 2 + max / 2; 421 __asm __volatile ( 422 "movl $0x10, %%ecx\n\t" 423 "rdmsr\n\t" 424 "addl %%edi, %%eax\n\t" 425 "adcl %%esi, %%edx\n\t" 426 "wrmsr\n" 427 : /* No output */ 428 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 429 : "ax", "cx", "dx", "cc" 430 ); 431 } 432 433 static int 434 test_tsc(void) 435 { 436 uint64_t *data, *tsc; 437 u_int i, size, adj; 438 439 if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 440 return (-100); 441 size = (mp_maxid + 1) * 3; 442 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 443 adj = 0; 444 retry: 445 for (i = 0, tsc = data; i < N; i++, tsc += size) 446 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 447 smp_tsc = 1; /* XXX */ 448 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 449 smp_no_rendezvous_barrier, data); 450 if (!smp_tsc && adj < smp_tsc_adjust) { 451 adj++; 452 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 453 smp_no_rendezvous_barrier, data); 454 goto retry; 455 } 456 free(data, M_TEMP); 457 if (bootverbose) 458 printf("SMP: %sed TSC synchronization test%s\n", 459 smp_tsc ? "pass" : "fail", 460 adj > 0 ? " after adjustment" : ""); 461 if (smp_tsc && tsc_is_invariant) { 462 switch (cpu_vendor_id) { 463 case CPU_VENDOR_AMD: 464 /* 465 * Starting with Family 15h processors, TSC clock 466 * source is in the north bridge. Check whether 467 * we have a single-socket/multi-core platform. 468 * XXX Need more work for complex cases. 469 */ 470 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 471 (amd_feature2 & AMDID2_CMP) == 0 || 472 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 473 break; 474 return (1000); 475 case CPU_VENDOR_INTEL: 476 /* 477 * XXX Assume Intel platforms have synchronized TSCs. 478 */ 479 return (1000); 480 } 481 return (800); 482 } 483 return (-100); 484 } 485 486 #undef N 487 488 #else 489 490 /* 491 * The function is not called, it is provided to avoid linking failure 492 * on uniprocessor kernel. 493 */ 494 static int 495 test_tsc(void) 496 { 497 498 return (0); 499 } 500 501 #endif /* SMP */ 502 503 static void 504 init_TSC_tc(void) 505 { 506 uint64_t max_freq; 507 int shift; 508 509 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 510 return; 511 512 /* 513 * Limit timecounter frequency to fit in an int and prevent it from 514 * overflowing too fast. 515 */ 516 max_freq = UINT_MAX; 517 518 /* 519 * We can not use the TSC if we support APM. Precise timekeeping 520 * on an APM'ed machine is at best a fools pursuit, since 521 * any and all of the time spent in various SMM code can't 522 * be reliably accounted for. Reading the RTC is your only 523 * source of reliable time info. The i8254 loses too, of course, 524 * but we need to have some kind of time... 525 * We don't know at this point whether APM is going to be used 526 * or not, nor when it might be activated. Play it safe. 527 */ 528 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 529 tsc_timecounter.tc_quality = -1000; 530 if (bootverbose) 531 printf("TSC timecounter disabled: APM enabled.\n"); 532 goto init; 533 } 534 535 /* 536 * Intel CPUs without a C-state invariant TSC can stop the TSC 537 * in either C2 or C3. Disable use of C2 and C3 while using 538 * the TSC as the timecounter. The timecounter can be changed 539 * to enable C2 and C3. 540 * 541 * Note that the TSC is used as the cputicker for computing 542 * thread runtime regardless of the timecounter setting, so 543 * using an alternate timecounter and enabling C2 or C3 can 544 * result incorrect runtimes for kernel idle threads (but not 545 * for any non-idle threads). 546 */ 547 if (cpu_vendor_id == CPU_VENDOR_INTEL && 548 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 549 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 550 if (bootverbose) 551 printf("TSC timecounter disables C2 and C3.\n"); 552 } 553 554 /* 555 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 556 * are synchronized. If the user is sure that the system has 557 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 558 * non-zero value. The TSC seems unreliable in virtualized SMP 559 * environments, so it is set to a negative quality in those cases. 560 */ 561 if (mp_ncpus > 1) 562 tsc_timecounter.tc_quality = test_tsc(); 563 else if (tsc_is_invariant) 564 tsc_timecounter.tc_quality = 1000; 565 max_freq >>= tsc_shift; 566 567 init: 568 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 569 ; 570 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 571 if (cpu_vendor_id == CPU_VENDOR_AMD) { 572 tsc_timecounter.tc_get_timecount = shift > 0 ? 573 tsc_get_timecount_low_mfence : 574 tsc_get_timecount_mfence; 575 } else { 576 tsc_timecounter.tc_get_timecount = shift > 0 ? 577 tsc_get_timecount_low_lfence : 578 tsc_get_timecount_lfence; 579 } 580 } else { 581 tsc_timecounter.tc_get_timecount = shift > 0 ? 582 tsc_get_timecount_low : tsc_get_timecount; 583 } 584 if (shift > 0) { 585 tsc_timecounter.tc_name = "TSC-low"; 586 if (bootverbose) 587 printf("TSC timecounter discards lower %d bit(s)\n", 588 shift); 589 } 590 if (tsc_freq != 0) { 591 tsc_timecounter.tc_frequency = tsc_freq >> shift; 592 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 593 tc_init(&tsc_timecounter); 594 } 595 } 596 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 597 598 /* 599 * When cpufreq levels change, find out about the (new) max frequency. We 600 * use this to update CPU accounting in case it got a lower estimate at boot. 601 */ 602 static void 603 tsc_levels_changed(void *arg, int unit) 604 { 605 device_t cf_dev; 606 struct cf_level *levels; 607 int count, error; 608 uint64_t max_freq; 609 610 /* Only use values from the first CPU, assuming all are equal. */ 611 if (unit != 0) 612 return; 613 614 /* Find the appropriate cpufreq device instance. */ 615 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 616 if (cf_dev == NULL) { 617 printf("tsc_levels_changed() called but no cpufreq device?\n"); 618 return; 619 } 620 621 /* Get settings from the device and find the max frequency. */ 622 count = 64; 623 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 624 if (levels == NULL) 625 return; 626 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 627 if (error == 0 && count != 0) { 628 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 629 set_cputicker(rdtsc, max_freq, 1); 630 } else 631 printf("tsc_levels_changed: no max freq found\n"); 632 free(levels, M_TEMP); 633 } 634 635 /* 636 * If the TSC timecounter is in use, veto the pending change. It may be 637 * possible in the future to handle a dynamically-changing timecounter rate. 638 */ 639 static void 640 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 641 { 642 643 if (*status != 0 || timecounter != &tsc_timecounter) 644 return; 645 646 printf("timecounter TSC must not be in use when " 647 "changing frequencies; change denied\n"); 648 *status = EBUSY; 649 } 650 651 /* Update TSC freq with the value indicated by the caller. */ 652 static void 653 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 654 { 655 uint64_t freq; 656 657 /* If there was an error during the transition, don't do anything. */ 658 if (tsc_disabled || status != 0) 659 return; 660 661 /* Total setting for this level gives the new frequency in MHz. */ 662 freq = (uint64_t)level->total_set.freq * 1000000; 663 atomic_store_rel_64(&tsc_freq, freq); 664 tsc_timecounter.tc_frequency = 665 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 666 } 667 668 static int 669 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 670 { 671 int error; 672 uint64_t freq; 673 674 freq = atomic_load_acq_64(&tsc_freq); 675 if (freq == 0) 676 return (EOPNOTSUPP); 677 error = sysctl_handle_64(oidp, &freq, 0, req); 678 if (error == 0 && req->newptr != NULL) { 679 atomic_store_rel_64(&tsc_freq, freq); 680 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 681 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 682 } 683 return (error); 684 } 685 686 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 687 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 688 689 static u_int 690 tsc_get_timecount(struct timecounter *tc __unused) 691 { 692 693 return (rdtsc32()); 694 } 695 696 static inline u_int 697 tsc_get_timecount_low(struct timecounter *tc) 698 { 699 uint32_t rv; 700 701 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 702 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 703 return (rv); 704 } 705 706 static u_int 707 tsc_get_timecount_lfence(struct timecounter *tc __unused) 708 { 709 710 lfence(); 711 return (rdtsc32()); 712 } 713 714 static u_int 715 tsc_get_timecount_low_lfence(struct timecounter *tc) 716 { 717 718 lfence(); 719 return (tsc_get_timecount_low(tc)); 720 } 721 722 static u_int 723 tsc_get_timecount_mfence(struct timecounter *tc __unused) 724 { 725 726 mfence(); 727 return (rdtsc32()); 728 } 729 730 static u_int 731 tsc_get_timecount_low_mfence(struct timecounter *tc) 732 { 733 734 mfence(); 735 return (tsc_get_timecount_low(tc)); 736 } 737 738 static uint32_t 739 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 740 { 741 742 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 743 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 744 vdso_th->th_x86_hpet_idx = 0xffffffff; 745 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 746 return (1); 747 } 748 749 #ifdef COMPAT_FREEBSD32 750 static uint32_t 751 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 752 struct timecounter *tc) 753 { 754 755 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 756 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 757 vdso_th32->th_x86_hpet_idx = 0xffffffff; 758 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 759 return (1); 760 } 761 #endif 762