1 /*- 2 * Copyright (c) 1998-2003 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_clock.h" 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/cpu.h> 36 #include <sys/limits.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 #include <sys/sysctl.h> 40 #include <sys/time.h> 41 #include <sys/timetc.h> 42 #include <sys/kernel.h> 43 #include <sys/power.h> 44 #include <sys/smp.h> 45 #include <sys/vdso.h> 46 #include <machine/clock.h> 47 #include <machine/cputypes.h> 48 #include <machine/md_var.h> 49 #include <machine/specialreg.h> 50 51 #include "cpufreq_if.h" 52 53 uint64_t tsc_freq; 54 int tsc_is_invariant; 55 int tsc_perf_stat; 56 57 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 58 59 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 60 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 61 TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); 62 63 #ifdef SMP 64 static int smp_tsc; 65 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 66 "Indicates whether the TSC is safe to use in SMP mode"); 67 TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); 68 #endif 69 70 static int tsc_disabled; 71 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 72 "Disable x86 Time Stamp Counter"); 73 TUNABLE_INT("machdep.disable_tsc", &tsc_disabled); 74 75 static int tsc_skip_calibration; 76 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 77 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 78 TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration); 79 80 static void tsc_freq_changed(void *arg, const struct cf_level *level, 81 int status); 82 static void tsc_freq_changing(void *arg, const struct cf_level *level, 83 int *status); 84 static unsigned tsc_get_timecount(struct timecounter *tc); 85 static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 86 static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 87 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 88 static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 89 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 90 static void tsc_levels_changed(void *arg, int unit); 91 92 static struct timecounter tsc_timecounter = { 93 tsc_get_timecount, /* get_timecount */ 94 0, /* no poll_pps */ 95 ~0u, /* counter_mask */ 96 0, /* frequency */ 97 "TSC", /* name */ 98 800, /* quality (adjusted in code) */ 99 }; 100 101 #define VMW_HVMAGIC 0x564d5868 102 #define VMW_HVPORT 0x5658 103 #define VMW_HVCMD_GETVERSION 10 104 #define VMW_HVCMD_GETHZ 45 105 106 static __inline void 107 vmware_hvcall(u_int cmd, u_int *p) 108 { 109 110 __asm __volatile("inl %w3, %0" 111 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 112 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 113 : "memory"); 114 } 115 116 static int 117 tsc_freq_vmware(void) 118 { 119 char hv_sig[13]; 120 u_int regs[4]; 121 char *p; 122 u_int hv_high; 123 int i; 124 125 /* 126 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 127 * http://lkml.org/lkml/2008/10/1/246 128 * 129 * KB1009458: Mechanisms to determine if software is running in 130 * a VMware virtual machine 131 * http://kb.vmware.com/kb/1009458 132 */ 133 hv_high = 0; 134 if ((cpu_feature2 & CPUID2_HV) != 0) { 135 do_cpuid(0x40000000, regs); 136 hv_high = regs[0]; 137 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 138 memcpy(p, ®s[i], sizeof(regs[i])); 139 *p = '\0'; 140 if (bootverbose) { 141 /* 142 * HV vendor ID string 143 * ------------+-------------- 144 * KVM "KVMKVMKVM" 145 * Microsoft "Microsoft Hv" 146 * VMware "VMwareVMware" 147 * Xen "XenVMMXenVMM" 148 */ 149 printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 150 } 151 if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 152 return (0); 153 } else { 154 p = getenv("smbios.system.serial"); 155 if (p == NULL) 156 return (0); 157 if (strncmp(p, "VMware-", 7) != 0 && 158 strncmp(p, "VMW", 3) != 0) { 159 freeenv(p); 160 return (0); 161 } 162 freeenv(p); 163 vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 164 if (regs[1] != VMW_HVMAGIC) 165 return (0); 166 } 167 if (hv_high >= 0x40000010) { 168 do_cpuid(0x40000010, regs); 169 tsc_freq = regs[0] * 1000; 170 } else { 171 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 172 if (regs[1] != UINT_MAX) 173 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 174 } 175 tsc_is_invariant = 1; 176 return (1); 177 } 178 179 static void 180 tsc_freq_intel(void) 181 { 182 char brand[48]; 183 u_int regs[4]; 184 uint64_t freq; 185 char *p; 186 u_int i; 187 188 /* 189 * Intel Processor Identification and the CPUID Instruction 190 * Application Note 485. 191 * http://www.intel.com/assets/pdf/appnote/241618.pdf 192 */ 193 if (cpu_exthigh >= 0x80000004) { 194 p = brand; 195 for (i = 0x80000002; i < 0x80000005; i++) { 196 do_cpuid(i, regs); 197 memcpy(p, regs, sizeof(regs)); 198 p += sizeof(regs); 199 } 200 p = NULL; 201 for (i = 0; i < sizeof(brand) - 1; i++) 202 if (brand[i] == 'H' && brand[i + 1] == 'z') 203 p = brand + i; 204 if (p != NULL) { 205 p -= 5; 206 switch (p[4]) { 207 case 'M': 208 i = 1; 209 break; 210 case 'G': 211 i = 1000; 212 break; 213 case 'T': 214 i = 1000000; 215 break; 216 default: 217 return; 218 } 219 #define C2D(c) ((c) - '0') 220 if (p[1] == '.') { 221 freq = C2D(p[0]) * 1000; 222 freq += C2D(p[2]) * 100; 223 freq += C2D(p[3]) * 10; 224 freq *= i * 1000; 225 } else { 226 freq = C2D(p[0]) * 1000; 227 freq += C2D(p[1]) * 100; 228 freq += C2D(p[2]) * 10; 229 freq += C2D(p[3]); 230 freq *= i * 1000000; 231 } 232 #undef C2D 233 tsc_freq = freq; 234 } 235 } 236 } 237 238 static void 239 probe_tsc_freq(void) 240 { 241 u_int regs[4]; 242 uint64_t tsc1, tsc2; 243 244 if (cpu_high >= 6) { 245 do_cpuid(6, regs); 246 if ((regs[2] & CPUID_PERF_STAT) != 0) { 247 /* 248 * XXX Some emulators expose host CPUID without actual 249 * support for these MSRs. We must test whether they 250 * really work. 251 */ 252 wrmsr(MSR_MPERF, 0); 253 wrmsr(MSR_APERF, 0); 254 DELAY(10); 255 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 256 tsc_perf_stat = 1; 257 } 258 } 259 260 if (tsc_freq_vmware()) 261 return; 262 263 switch (cpu_vendor_id) { 264 case CPU_VENDOR_AMD: 265 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 266 (vm_guest == VM_GUEST_NO && 267 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 268 tsc_is_invariant = 1; 269 if (cpu_feature & CPUID_SSE2) { 270 tsc_timecounter.tc_get_timecount = 271 tsc_get_timecount_mfence; 272 } 273 break; 274 case CPU_VENDOR_INTEL: 275 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 276 (vm_guest == VM_GUEST_NO && 277 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 278 CPUID_TO_MODEL(cpu_id) >= 0xe) || 279 (CPUID_TO_FAMILY(cpu_id) == 0xf && 280 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 281 tsc_is_invariant = 1; 282 if (cpu_feature & CPUID_SSE2) { 283 tsc_timecounter.tc_get_timecount = 284 tsc_get_timecount_lfence; 285 } 286 break; 287 case CPU_VENDOR_CENTAUR: 288 if (vm_guest == VM_GUEST_NO && 289 CPUID_TO_FAMILY(cpu_id) == 0x6 && 290 CPUID_TO_MODEL(cpu_id) >= 0xf && 291 (rdmsr(0x1203) & 0x100000000ULL) == 0) 292 tsc_is_invariant = 1; 293 if (cpu_feature & CPUID_SSE2) { 294 tsc_timecounter.tc_get_timecount = 295 tsc_get_timecount_lfence; 296 } 297 break; 298 } 299 300 if (tsc_skip_calibration) { 301 if (cpu_vendor_id == CPU_VENDOR_INTEL) 302 tsc_freq_intel(); 303 return; 304 } 305 306 if (bootverbose) 307 printf("Calibrating TSC clock ... "); 308 tsc1 = rdtsc(); 309 DELAY(1000000); 310 tsc2 = rdtsc(); 311 tsc_freq = tsc2 - tsc1; 312 if (bootverbose) 313 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 314 } 315 316 void 317 init_TSC(void) 318 { 319 320 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 321 return; 322 323 probe_tsc_freq(); 324 325 /* 326 * Inform CPU accounting about our boot-time clock rate. This will 327 * be updated if someone loads a cpufreq driver after boot that 328 * discovers a new max frequency. 329 */ 330 if (tsc_freq != 0) 331 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 332 333 if (tsc_is_invariant) 334 return; 335 336 /* Register to find out about changes in CPU frequency. */ 337 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 338 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 339 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 340 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 341 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 342 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 343 } 344 345 #ifdef SMP 346 347 /* 348 * RDTSC is not a serializing instruction, and does not drain 349 * instruction stream, so we need to drain the stream before executing 350 * it. It could be fixed by use of RDTSCP, except the instruction is 351 * not available everywhere. 352 * 353 * Use CPUID for draining in the boot-time SMP constistency test. The 354 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 355 * and VIA) when SSE2 is present, and nothing on older machines which 356 * also do not issue RDTSC prematurely. There, testing for SSE2 and 357 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 358 * 359 * Do not use do_cpuid(), since we do not need CPUID results, which 360 * have to be written into memory with do_cpuid(). 361 */ 362 #define TSC_READ(x) \ 363 static void \ 364 tsc_read_##x(void *arg) \ 365 { \ 366 uint64_t *tsc = arg; \ 367 u_int cpu = PCPU_GET(cpuid); \ 368 \ 369 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 370 tsc[cpu * 3 + x] = rdtsc(); \ 371 } 372 TSC_READ(0) 373 TSC_READ(1) 374 TSC_READ(2) 375 #undef TSC_READ 376 377 #define N 1000 378 379 static void 380 comp_smp_tsc(void *arg) 381 { 382 uint64_t *tsc; 383 int64_t d1, d2; 384 u_int cpu = PCPU_GET(cpuid); 385 u_int i, j, size; 386 387 size = (mp_maxid + 1) * 3; 388 for (i = 0, tsc = arg; i < N; i++, tsc += size) 389 CPU_FOREACH(j) { 390 if (j == cpu) 391 continue; 392 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 393 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 394 if (d1 <= 0 || d2 <= 0) { 395 smp_tsc = 0; 396 return; 397 } 398 } 399 } 400 401 static int 402 test_smp_tsc(void) 403 { 404 uint64_t *data, *tsc; 405 u_int i, size; 406 407 if (!smp_tsc && !tsc_is_invariant) 408 return (-100); 409 size = (mp_maxid + 1) * 3; 410 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 411 for (i = 0, tsc = data; i < N; i++, tsc += size) 412 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 413 smp_tsc = 1; /* XXX */ 414 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 415 smp_no_rendevous_barrier, data); 416 free(data, M_TEMP); 417 if (bootverbose) 418 printf("SMP: %sed TSC synchronization test\n", 419 smp_tsc ? "pass" : "fail"); 420 if (smp_tsc && tsc_is_invariant) { 421 switch (cpu_vendor_id) { 422 case CPU_VENDOR_AMD: 423 /* 424 * Starting with Family 15h processors, TSC clock 425 * source is in the north bridge. Check whether 426 * we have a single-socket/multi-core platform. 427 * XXX Need more work for complex cases. 428 */ 429 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 430 (amd_feature2 & AMDID2_CMP) == 0 || 431 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 432 break; 433 return (1000); 434 case CPU_VENDOR_INTEL: 435 /* 436 * XXX Assume Intel platforms have synchronized TSCs. 437 */ 438 return (1000); 439 } 440 return (800); 441 } 442 return (-100); 443 } 444 445 #undef N 446 447 #endif /* SMP */ 448 449 static void 450 init_TSC_tc(void) 451 { 452 uint64_t max_freq; 453 int shift; 454 455 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 456 return; 457 458 /* 459 * Limit timecounter frequency to fit in an int and prevent it from 460 * overflowing too fast. 461 */ 462 max_freq = UINT_MAX; 463 464 /* 465 * We can not use the TSC if we support APM. Precise timekeeping 466 * on an APM'ed machine is at best a fools pursuit, since 467 * any and all of the time spent in various SMM code can't 468 * be reliably accounted for. Reading the RTC is your only 469 * source of reliable time info. The i8254 loses too, of course, 470 * but we need to have some kind of time... 471 * We don't know at this point whether APM is going to be used 472 * or not, nor when it might be activated. Play it safe. 473 */ 474 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 475 tsc_timecounter.tc_quality = -1000; 476 if (bootverbose) 477 printf("TSC timecounter disabled: APM enabled.\n"); 478 goto init; 479 } 480 481 /* 482 * We cannot use the TSC if it stops incrementing in deep sleep. 483 * Currently only Intel CPUs are known for this problem unless 484 * the invariant TSC bit is set. 485 */ 486 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 487 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 488 tsc_timecounter.tc_quality = -1000; 489 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 490 if (bootverbose) 491 printf("TSC timecounter disabled: C3 enabled.\n"); 492 goto init; 493 } 494 495 #ifdef SMP 496 /* 497 * We can not use the TSC in SMP mode unless the TSCs on all CPUs are 498 * synchronized. If the user is sure that the system has synchronized 499 * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value. 500 * We also limit the frequency even lower to avoid "temporal anomalies" 501 * as much as possible. The TSC seems unreliable in virtualized SMP 502 * environments, so it is set to a negative quality in those cases. 503 */ 504 if (smp_cpus > 1) { 505 if (vm_guest != 0) { 506 tsc_timecounter.tc_quality = -100; 507 } else { 508 tsc_timecounter.tc_quality = test_smp_tsc(); 509 max_freq >>= 8; 510 } 511 } else 512 #endif 513 if (tsc_is_invariant) 514 tsc_timecounter.tc_quality = 1000; 515 516 init: 517 for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++) 518 ; 519 if (shift > 0) { 520 if (cpu_feature & CPUID_SSE2) { 521 if (cpu_vendor_id == CPU_VENDOR_AMD) { 522 tsc_timecounter.tc_get_timecount = 523 tsc_get_timecount_low_mfence; 524 } else { 525 tsc_timecounter.tc_get_timecount = 526 tsc_get_timecount_low_lfence; 527 } 528 } else 529 tsc_timecounter.tc_get_timecount = tsc_get_timecount_low; 530 tsc_timecounter.tc_name = "TSC-low"; 531 if (bootverbose) 532 printf("TSC timecounter discards lower %d bit(s)\n", 533 shift); 534 } 535 if (tsc_freq != 0) { 536 tsc_timecounter.tc_frequency = tsc_freq >> shift; 537 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 538 tc_init(&tsc_timecounter); 539 } 540 } 541 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 542 543 /* 544 * When cpufreq levels change, find out about the (new) max frequency. We 545 * use this to update CPU accounting in case it got a lower estimate at boot. 546 */ 547 static void 548 tsc_levels_changed(void *arg, int unit) 549 { 550 device_t cf_dev; 551 struct cf_level *levels; 552 int count, error; 553 uint64_t max_freq; 554 555 /* Only use values from the first CPU, assuming all are equal. */ 556 if (unit != 0) 557 return; 558 559 /* Find the appropriate cpufreq device instance. */ 560 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 561 if (cf_dev == NULL) { 562 printf("tsc_levels_changed() called but no cpufreq device?\n"); 563 return; 564 } 565 566 /* Get settings from the device and find the max frequency. */ 567 count = 64; 568 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 569 if (levels == NULL) 570 return; 571 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 572 if (error == 0 && count != 0) { 573 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 574 set_cputicker(rdtsc, max_freq, 1); 575 } else 576 printf("tsc_levels_changed: no max freq found\n"); 577 free(levels, M_TEMP); 578 } 579 580 /* 581 * If the TSC timecounter is in use, veto the pending change. It may be 582 * possible in the future to handle a dynamically-changing timecounter rate. 583 */ 584 static void 585 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 586 { 587 588 if (*status != 0 || timecounter != &tsc_timecounter) 589 return; 590 591 printf("timecounter TSC must not be in use when " 592 "changing frequencies; change denied\n"); 593 *status = EBUSY; 594 } 595 596 /* Update TSC freq with the value indicated by the caller. */ 597 static void 598 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 599 { 600 uint64_t freq; 601 602 /* If there was an error during the transition, don't do anything. */ 603 if (tsc_disabled || status != 0) 604 return; 605 606 /* Total setting for this level gives the new frequency in MHz. */ 607 freq = (uint64_t)level->total_set.freq * 1000000; 608 atomic_store_rel_64(&tsc_freq, freq); 609 tsc_timecounter.tc_frequency = 610 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 611 } 612 613 static int 614 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 615 { 616 int error; 617 uint64_t freq; 618 619 freq = atomic_load_acq_64(&tsc_freq); 620 if (freq == 0) 621 return (EOPNOTSUPP); 622 error = sysctl_handle_64(oidp, &freq, 0, req); 623 if (error == 0 && req->newptr != NULL) { 624 atomic_store_rel_64(&tsc_freq, freq); 625 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 626 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 627 } 628 return (error); 629 } 630 631 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 632 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 633 634 static u_int 635 tsc_get_timecount(struct timecounter *tc __unused) 636 { 637 638 return (rdtsc32()); 639 } 640 641 static inline u_int 642 tsc_get_timecount_low(struct timecounter *tc) 643 { 644 uint32_t rv; 645 646 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 647 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 648 return (rv); 649 } 650 651 static u_int 652 tsc_get_timecount_lfence(struct timecounter *tc __unused) 653 { 654 655 lfence(); 656 return (rdtsc32()); 657 } 658 659 static u_int 660 tsc_get_timecount_low_lfence(struct timecounter *tc) 661 { 662 663 lfence(); 664 return (tsc_get_timecount_low(tc)); 665 } 666 667 static u_int 668 tsc_get_timecount_mfence(struct timecounter *tc __unused) 669 { 670 671 mfence(); 672 return (rdtsc32()); 673 } 674 675 static u_int 676 tsc_get_timecount_low_mfence(struct timecounter *tc) 677 { 678 679 mfence(); 680 return (tsc_get_timecount_low(tc)); 681 } 682 683 uint32_t 684 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th) 685 { 686 687 vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 688 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 689 return (timecounter == &tsc_timecounter); 690 } 691 692 #ifdef COMPAT_FREEBSD32 693 uint32_t 694 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 695 { 696 697 vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 698 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 699 return (timecounter == &tsc_timecounter); 700 } 701 #endif 702