1 /*- 2 * Copyright (c) 1998-2003 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_clock.h" 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/cpu.h> 36 #include <sys/limits.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 #include <sys/sysctl.h> 40 #include <sys/time.h> 41 #include <sys/timetc.h> 42 #include <sys/kernel.h> 43 #include <sys/power.h> 44 #include <sys/smp.h> 45 #include <sys/vdso.h> 46 #include <machine/clock.h> 47 #include <machine/cputypes.h> 48 #include <machine/md_var.h> 49 #include <machine/specialreg.h> 50 #include <x86/vmware.h> 51 52 #include "cpufreq_if.h" 53 54 uint64_t tsc_freq; 55 int tsc_is_invariant; 56 int tsc_perf_stat; 57 58 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 59 60 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 61 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 62 63 #ifdef SMP 64 int smp_tsc; 65 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 66 "Indicates whether the TSC is safe to use in SMP mode"); 67 68 int smp_tsc_adjust = 0; 69 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 70 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 71 #endif 72 73 static int tsc_shift = 1; 74 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 75 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 76 77 static int tsc_disabled; 78 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 79 "Disable x86 Time Stamp Counter"); 80 81 static int tsc_skip_calibration; 82 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 83 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 84 85 static void tsc_freq_changed(void *arg, const struct cf_level *level, 86 int status); 87 static void tsc_freq_changing(void *arg, const struct cf_level *level, 88 int *status); 89 static unsigned tsc_get_timecount(struct timecounter *tc); 90 static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 91 static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 92 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 93 static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 94 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 95 static void tsc_levels_changed(void *arg, int unit); 96 97 static struct timecounter tsc_timecounter = { 98 tsc_get_timecount, /* get_timecount */ 99 0, /* no poll_pps */ 100 ~0u, /* counter_mask */ 101 0, /* frequency */ 102 "TSC", /* name */ 103 800, /* quality (adjusted in code) */ 104 }; 105 106 static void 107 tsc_freq_vmware(void) 108 { 109 u_int regs[4]; 110 111 if (hv_high >= 0x40000010) { 112 do_cpuid(0x40000010, regs); 113 tsc_freq = regs[0] * 1000; 114 } else { 115 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 116 if (regs[1] != UINT_MAX) 117 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 118 } 119 tsc_is_invariant = 1; 120 } 121 122 static void 123 tsc_freq_intel(void) 124 { 125 char brand[48]; 126 u_int regs[4]; 127 uint64_t freq; 128 char *p; 129 u_int i; 130 131 /* 132 * Intel Processor Identification and the CPUID Instruction 133 * Application Note 485. 134 * http://www.intel.com/assets/pdf/appnote/241618.pdf 135 */ 136 if (cpu_exthigh >= 0x80000004) { 137 p = brand; 138 for (i = 0x80000002; i < 0x80000005; i++) { 139 do_cpuid(i, regs); 140 memcpy(p, regs, sizeof(regs)); 141 p += sizeof(regs); 142 } 143 p = NULL; 144 for (i = 0; i < sizeof(brand) - 1; i++) 145 if (brand[i] == 'H' && brand[i + 1] == 'z') 146 p = brand + i; 147 if (p != NULL) { 148 p -= 5; 149 switch (p[4]) { 150 case 'M': 151 i = 1; 152 break; 153 case 'G': 154 i = 1000; 155 break; 156 case 'T': 157 i = 1000000; 158 break; 159 default: 160 return; 161 } 162 #define C2D(c) ((c) - '0') 163 if (p[1] == '.') { 164 freq = C2D(p[0]) * 1000; 165 freq += C2D(p[2]) * 100; 166 freq += C2D(p[3]) * 10; 167 freq *= i * 1000; 168 } else { 169 freq = C2D(p[0]) * 1000; 170 freq += C2D(p[1]) * 100; 171 freq += C2D(p[2]) * 10; 172 freq += C2D(p[3]); 173 freq *= i * 1000000; 174 } 175 #undef C2D 176 tsc_freq = freq; 177 } 178 } 179 } 180 181 static void 182 probe_tsc_freq(void) 183 { 184 u_int regs[4]; 185 uint64_t tsc1, tsc2; 186 187 if (cpu_high >= 6) { 188 do_cpuid(6, regs); 189 if ((regs[2] & CPUID_PERF_STAT) != 0) { 190 /* 191 * XXX Some emulators expose host CPUID without actual 192 * support for these MSRs. We must test whether they 193 * really work. 194 */ 195 wrmsr(MSR_MPERF, 0); 196 wrmsr(MSR_APERF, 0); 197 DELAY(10); 198 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 199 tsc_perf_stat = 1; 200 } 201 } 202 203 if (vm_guest == VM_GUEST_VMWARE) { 204 tsc_freq_vmware(); 205 return; 206 } 207 208 switch (cpu_vendor_id) { 209 case CPU_VENDOR_AMD: 210 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 211 (vm_guest == VM_GUEST_NO && 212 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 213 tsc_is_invariant = 1; 214 if (cpu_feature & CPUID_SSE2) { 215 tsc_timecounter.tc_get_timecount = 216 tsc_get_timecount_mfence; 217 } 218 break; 219 case CPU_VENDOR_INTEL: 220 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 221 (vm_guest == VM_GUEST_NO && 222 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 223 CPUID_TO_MODEL(cpu_id) >= 0xe) || 224 (CPUID_TO_FAMILY(cpu_id) == 0xf && 225 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 226 tsc_is_invariant = 1; 227 if (cpu_feature & CPUID_SSE2) { 228 tsc_timecounter.tc_get_timecount = 229 tsc_get_timecount_lfence; 230 } 231 break; 232 case CPU_VENDOR_CENTAUR: 233 if (vm_guest == VM_GUEST_NO && 234 CPUID_TO_FAMILY(cpu_id) == 0x6 && 235 CPUID_TO_MODEL(cpu_id) >= 0xf && 236 (rdmsr(0x1203) & 0x100000000ULL) == 0) 237 tsc_is_invariant = 1; 238 if (cpu_feature & CPUID_SSE2) { 239 tsc_timecounter.tc_get_timecount = 240 tsc_get_timecount_lfence; 241 } 242 break; 243 } 244 245 if (tsc_skip_calibration) { 246 if (cpu_vendor_id == CPU_VENDOR_INTEL) 247 tsc_freq_intel(); 248 return; 249 } 250 251 if (bootverbose) 252 printf("Calibrating TSC clock ... "); 253 tsc1 = rdtsc(); 254 DELAY(1000000); 255 tsc2 = rdtsc(); 256 tsc_freq = tsc2 - tsc1; 257 if (bootverbose) 258 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 259 } 260 261 void 262 init_TSC(void) 263 { 264 265 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 266 return; 267 268 #ifdef __i386__ 269 /* The TSC is known to be broken on certain CPUs. */ 270 switch (cpu_vendor_id) { 271 case CPU_VENDOR_AMD: 272 switch (cpu_id & 0xFF0) { 273 case 0x500: 274 /* K5 Model 0 */ 275 return; 276 } 277 break; 278 case CPU_VENDOR_CENTAUR: 279 switch (cpu_id & 0xff0) { 280 case 0x540: 281 /* 282 * http://www.centtech.com/c6_data_sheet.pdf 283 * 284 * I-12 RDTSC may return incoherent values in EDX:EAX 285 * I-13 RDTSC hangs when certain event counters are used 286 */ 287 return; 288 } 289 break; 290 case CPU_VENDOR_NSC: 291 switch (cpu_id & 0xff0) { 292 case 0x540: 293 if ((cpu_id & CPUID_STEPPING) == 0) 294 return; 295 break; 296 } 297 break; 298 } 299 #endif 300 301 probe_tsc_freq(); 302 303 /* 304 * Inform CPU accounting about our boot-time clock rate. This will 305 * be updated if someone loads a cpufreq driver after boot that 306 * discovers a new max frequency. 307 */ 308 if (tsc_freq != 0) 309 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 310 311 if (tsc_is_invariant) 312 return; 313 314 /* Register to find out about changes in CPU frequency. */ 315 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 316 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 317 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 318 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 319 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 320 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 321 } 322 323 #ifdef SMP 324 325 /* 326 * RDTSC is not a serializing instruction, and does not drain 327 * instruction stream, so we need to drain the stream before executing 328 * it. It could be fixed by use of RDTSCP, except the instruction is 329 * not available everywhere. 330 * 331 * Use CPUID for draining in the boot-time SMP constistency test. The 332 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 333 * and VIA) when SSE2 is present, and nothing on older machines which 334 * also do not issue RDTSC prematurely. There, testing for SSE2 and 335 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 336 * 337 * Do not use do_cpuid(), since we do not need CPUID results, which 338 * have to be written into memory with do_cpuid(). 339 */ 340 #define TSC_READ(x) \ 341 static void \ 342 tsc_read_##x(void *arg) \ 343 { \ 344 uint64_t *tsc = arg; \ 345 u_int cpu = PCPU_GET(cpuid); \ 346 \ 347 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 348 tsc[cpu * 3 + x] = rdtsc(); \ 349 } 350 TSC_READ(0) 351 TSC_READ(1) 352 TSC_READ(2) 353 #undef TSC_READ 354 355 #define N 1000 356 357 static void 358 comp_smp_tsc(void *arg) 359 { 360 uint64_t *tsc; 361 int64_t d1, d2; 362 u_int cpu = PCPU_GET(cpuid); 363 u_int i, j, size; 364 365 size = (mp_maxid + 1) * 3; 366 for (i = 0, tsc = arg; i < N; i++, tsc += size) 367 CPU_FOREACH(j) { 368 if (j == cpu) 369 continue; 370 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 371 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 372 if (d1 <= 0 || d2 <= 0) { 373 smp_tsc = 0; 374 return; 375 } 376 } 377 } 378 379 static void 380 adj_smp_tsc(void *arg) 381 { 382 uint64_t *tsc; 383 int64_t d, min, max; 384 u_int cpu = PCPU_GET(cpuid); 385 u_int first, i, size; 386 387 first = CPU_FIRST(); 388 if (cpu == first) 389 return; 390 min = INT64_MIN; 391 max = INT64_MAX; 392 size = (mp_maxid + 1) * 3; 393 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 394 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 395 if (d > min) 396 min = d; 397 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 398 if (d > min) 399 min = d; 400 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 401 if (d < max) 402 max = d; 403 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 404 if (d < max) 405 max = d; 406 } 407 if (min > max) 408 return; 409 d = min / 2 + max / 2; 410 __asm __volatile ( 411 "movl $0x10, %%ecx\n\t" 412 "rdmsr\n\t" 413 "addl %%edi, %%eax\n\t" 414 "adcl %%esi, %%edx\n\t" 415 "wrmsr\n" 416 : /* No output */ 417 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 418 : "ax", "cx", "dx", "cc" 419 ); 420 } 421 422 static int 423 test_tsc(void) 424 { 425 uint64_t *data, *tsc; 426 u_int i, size, adj; 427 428 if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 429 return (-100); 430 size = (mp_maxid + 1) * 3; 431 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 432 adj = 0; 433 retry: 434 for (i = 0, tsc = data; i < N; i++, tsc += size) 435 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 436 smp_tsc = 1; /* XXX */ 437 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 438 smp_no_rendevous_barrier, data); 439 if (!smp_tsc && adj < smp_tsc_adjust) { 440 adj++; 441 smp_rendezvous(smp_no_rendevous_barrier, adj_smp_tsc, 442 smp_no_rendevous_barrier, data); 443 goto retry; 444 } 445 free(data, M_TEMP); 446 if (bootverbose) 447 printf("SMP: %sed TSC synchronization test%s\n", 448 smp_tsc ? "pass" : "fail", 449 adj > 0 ? " after adjustment" : ""); 450 if (smp_tsc && tsc_is_invariant) { 451 switch (cpu_vendor_id) { 452 case CPU_VENDOR_AMD: 453 /* 454 * Starting with Family 15h processors, TSC clock 455 * source is in the north bridge. Check whether 456 * we have a single-socket/multi-core platform. 457 * XXX Need more work for complex cases. 458 */ 459 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 460 (amd_feature2 & AMDID2_CMP) == 0 || 461 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 462 break; 463 return (1000); 464 case CPU_VENDOR_INTEL: 465 /* 466 * XXX Assume Intel platforms have synchronized TSCs. 467 */ 468 return (1000); 469 } 470 return (800); 471 } 472 return (-100); 473 } 474 475 #undef N 476 477 #else 478 479 /* 480 * The function is not called, it is provided to avoid linking failure 481 * on uniprocessor kernel. 482 */ 483 static int 484 test_tsc(void) 485 { 486 487 return (0); 488 } 489 490 #endif /* SMP */ 491 492 static void 493 init_TSC_tc(void) 494 { 495 uint64_t max_freq; 496 int shift; 497 498 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 499 return; 500 501 /* 502 * Limit timecounter frequency to fit in an int and prevent it from 503 * overflowing too fast. 504 */ 505 max_freq = UINT_MAX; 506 507 /* 508 * We can not use the TSC if we support APM. Precise timekeeping 509 * on an APM'ed machine is at best a fools pursuit, since 510 * any and all of the time spent in various SMM code can't 511 * be reliably accounted for. Reading the RTC is your only 512 * source of reliable time info. The i8254 loses too, of course, 513 * but we need to have some kind of time... 514 * We don't know at this point whether APM is going to be used 515 * or not, nor when it might be activated. Play it safe. 516 */ 517 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 518 tsc_timecounter.tc_quality = -1000; 519 if (bootverbose) 520 printf("TSC timecounter disabled: APM enabled.\n"); 521 goto init; 522 } 523 524 /* 525 * Intel CPUs without a C-state invariant TSC can stop the TSC 526 * in either C2 or C3. Disable use of C2 and C3 while using 527 * the TSC as the timecounter. The timecounter can be changed 528 * to enable C2 and C3. 529 * 530 * Note that the TSC is used as the cputicker for computing 531 * thread runtime regardless of the timecounter setting, so 532 * using an alternate timecounter and enabling C2 or C3 can 533 * result incorrect runtimes for kernel idle threads (but not 534 * for any non-idle threads). 535 */ 536 if (cpu_deepest_sleep >= 2 && cpu_vendor_id == CPU_VENDOR_INTEL && 537 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 538 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 539 if (bootverbose) 540 printf("TSC timecounter disables C2 and C3.\n"); 541 } 542 543 /* 544 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 545 * are synchronized. If the user is sure that the system has 546 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 547 * non-zero value. The TSC seems unreliable in virtualized SMP 548 * environments, so it is set to a negative quality in those cases. 549 */ 550 if (mp_ncpus > 1) 551 tsc_timecounter.tc_quality = test_tsc(); 552 else if (tsc_is_invariant) 553 tsc_timecounter.tc_quality = 1000; 554 max_freq >>= tsc_shift; 555 556 init: 557 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 558 ; 559 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 560 if (cpu_vendor_id == CPU_VENDOR_AMD) { 561 tsc_timecounter.tc_get_timecount = shift > 0 ? 562 tsc_get_timecount_low_mfence : 563 tsc_get_timecount_mfence; 564 } else { 565 tsc_timecounter.tc_get_timecount = shift > 0 ? 566 tsc_get_timecount_low_lfence : 567 tsc_get_timecount_lfence; 568 } 569 } else { 570 tsc_timecounter.tc_get_timecount = shift > 0 ? 571 tsc_get_timecount_low : tsc_get_timecount; 572 } 573 if (shift > 0) { 574 tsc_timecounter.tc_name = "TSC-low"; 575 if (bootverbose) 576 printf("TSC timecounter discards lower %d bit(s)\n", 577 shift); 578 } 579 if (tsc_freq != 0) { 580 tsc_timecounter.tc_frequency = tsc_freq >> shift; 581 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 582 tc_init(&tsc_timecounter); 583 } 584 } 585 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 586 587 /* 588 * When cpufreq levels change, find out about the (new) max frequency. We 589 * use this to update CPU accounting in case it got a lower estimate at boot. 590 */ 591 static void 592 tsc_levels_changed(void *arg, int unit) 593 { 594 device_t cf_dev; 595 struct cf_level *levels; 596 int count, error; 597 uint64_t max_freq; 598 599 /* Only use values from the first CPU, assuming all are equal. */ 600 if (unit != 0) 601 return; 602 603 /* Find the appropriate cpufreq device instance. */ 604 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 605 if (cf_dev == NULL) { 606 printf("tsc_levels_changed() called but no cpufreq device?\n"); 607 return; 608 } 609 610 /* Get settings from the device and find the max frequency. */ 611 count = 64; 612 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 613 if (levels == NULL) 614 return; 615 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 616 if (error == 0 && count != 0) { 617 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 618 set_cputicker(rdtsc, max_freq, 1); 619 } else 620 printf("tsc_levels_changed: no max freq found\n"); 621 free(levels, M_TEMP); 622 } 623 624 /* 625 * If the TSC timecounter is in use, veto the pending change. It may be 626 * possible in the future to handle a dynamically-changing timecounter rate. 627 */ 628 static void 629 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 630 { 631 632 if (*status != 0 || timecounter != &tsc_timecounter) 633 return; 634 635 printf("timecounter TSC must not be in use when " 636 "changing frequencies; change denied\n"); 637 *status = EBUSY; 638 } 639 640 /* Update TSC freq with the value indicated by the caller. */ 641 static void 642 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 643 { 644 uint64_t freq; 645 646 /* If there was an error during the transition, don't do anything. */ 647 if (tsc_disabled || status != 0) 648 return; 649 650 /* Total setting for this level gives the new frequency in MHz. */ 651 freq = (uint64_t)level->total_set.freq * 1000000; 652 atomic_store_rel_64(&tsc_freq, freq); 653 tsc_timecounter.tc_frequency = 654 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 655 } 656 657 static int 658 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 659 { 660 int error; 661 uint64_t freq; 662 663 freq = atomic_load_acq_64(&tsc_freq); 664 if (freq == 0) 665 return (EOPNOTSUPP); 666 error = sysctl_handle_64(oidp, &freq, 0, req); 667 if (error == 0 && req->newptr != NULL) { 668 atomic_store_rel_64(&tsc_freq, freq); 669 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 670 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 671 } 672 return (error); 673 } 674 675 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 676 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 677 678 static u_int 679 tsc_get_timecount(struct timecounter *tc __unused) 680 { 681 682 return (rdtsc32()); 683 } 684 685 static inline u_int 686 tsc_get_timecount_low(struct timecounter *tc) 687 { 688 uint32_t rv; 689 690 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 691 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 692 return (rv); 693 } 694 695 static u_int 696 tsc_get_timecount_lfence(struct timecounter *tc __unused) 697 { 698 699 lfence(); 700 return (rdtsc32()); 701 } 702 703 static u_int 704 tsc_get_timecount_low_lfence(struct timecounter *tc) 705 { 706 707 lfence(); 708 return (tsc_get_timecount_low(tc)); 709 } 710 711 static u_int 712 tsc_get_timecount_mfence(struct timecounter *tc __unused) 713 { 714 715 mfence(); 716 return (rdtsc32()); 717 } 718 719 static u_int 720 tsc_get_timecount_low_mfence(struct timecounter *tc) 721 { 722 723 mfence(); 724 return (tsc_get_timecount_low(tc)); 725 } 726 727 uint32_t 728 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 729 { 730 731 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 732 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 733 return (tc == &tsc_timecounter); 734 } 735 736 #ifdef COMPAT_FREEBSD32 737 uint32_t 738 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 739 struct timecounter *tc) 740 { 741 742 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 743 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 744 return (tc == &tsc_timecounter); 745 } 746 #endif 747