1dd7d207dSJung-uk Kim /*- 2dd7d207dSJung-uk Kim * Copyright (c) 1998-2003 Poul-Henning Kamp 3dd7d207dSJung-uk Kim * All rights reserved. 4dd7d207dSJung-uk Kim * 5dd7d207dSJung-uk Kim * Redistribution and use in source and binary forms, with or without 6dd7d207dSJung-uk Kim * modification, are permitted provided that the following conditions 7dd7d207dSJung-uk Kim * are met: 8dd7d207dSJung-uk Kim * 1. Redistributions of source code must retain the above copyright 9dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer. 10dd7d207dSJung-uk Kim * 2. Redistributions in binary form must reproduce the above copyright 11dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer in the 12dd7d207dSJung-uk Kim * documentation and/or other materials provided with the distribution. 13dd7d207dSJung-uk Kim * 14dd7d207dSJung-uk Kim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15dd7d207dSJung-uk Kim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16dd7d207dSJung-uk Kim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17dd7d207dSJung-uk Kim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18dd7d207dSJung-uk Kim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19dd7d207dSJung-uk Kim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20dd7d207dSJung-uk Kim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21dd7d207dSJung-uk Kim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22dd7d207dSJung-uk Kim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23dd7d207dSJung-uk Kim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24dd7d207dSJung-uk Kim * SUCH DAMAGE. 25dd7d207dSJung-uk Kim */ 26dd7d207dSJung-uk Kim 27dd7d207dSJung-uk Kim #include <sys/cdefs.h> 28dd7d207dSJung-uk Kim __FBSDID("$FreeBSD$"); 29dd7d207dSJung-uk Kim 30aea81038SKonstantin Belousov #include "opt_compat.h" 31dd7d207dSJung-uk Kim #include "opt_clock.h" 32dd7d207dSJung-uk Kim 33dd7d207dSJung-uk Kim #include <sys/param.h> 34dd7d207dSJung-uk Kim #include <sys/bus.h> 35dd7d207dSJung-uk Kim #include <sys/cpu.h> 365da5812bSJung-uk Kim #include <sys/limits.h> 37dd7d207dSJung-uk Kim #include <sys/malloc.h> 38dd7d207dSJung-uk Kim #include <sys/systm.h> 39dd7d207dSJung-uk Kim #include <sys/sysctl.h> 40dd7d207dSJung-uk Kim #include <sys/time.h> 41dd7d207dSJung-uk Kim #include <sys/timetc.h> 42dd7d207dSJung-uk Kim #include <sys/kernel.h> 43dd7d207dSJung-uk Kim #include <sys/power.h> 44dd7d207dSJung-uk Kim #include <sys/smp.h> 45aea81038SKonstantin Belousov #include <sys/vdso.h> 46dd7d207dSJung-uk Kim #include <machine/clock.h> 47dd7d207dSJung-uk Kim #include <machine/cputypes.h> 48dd7d207dSJung-uk Kim #include <machine/md_var.h> 49dd7d207dSJung-uk Kim #include <machine/specialreg.h> 50dd7d207dSJung-uk Kim 51dd7d207dSJung-uk Kim #include "cpufreq_if.h" 52dd7d207dSJung-uk Kim 53dd7d207dSJung-uk Kim uint64_t tsc_freq; 54dd7d207dSJung-uk Kim int tsc_is_invariant; 55155094d7SJung-uk Kim int tsc_perf_stat; 56155094d7SJung-uk Kim 57dd7d207dSJung-uk Kim static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 58dd7d207dSJung-uk Kim 59dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 60dd7d207dSJung-uk Kim &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 61dd7d207dSJung-uk Kim TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); 62dd7d207dSJung-uk Kim 63dd7d207dSJung-uk Kim #ifdef SMP 64dd7d207dSJung-uk Kim static int smp_tsc; 65dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 66dd7d207dSJung-uk Kim "Indicates whether the TSC is safe to use in SMP mode"); 67dd7d207dSJung-uk Kim TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); 68dd7d207dSJung-uk Kim #endif 69dd7d207dSJung-uk Kim 70*e7f1427dSKonstantin Belousov static int tsc_shift = 1; 71*e7f1427dSKonstantin Belousov SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 72*e7f1427dSKonstantin Belousov &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 73*e7f1427dSKonstantin Belousov TUNABLE_INT("kern.timecounter.tsc_shift", &tsc_shift); 74*e7f1427dSKonstantin Belousov 7579422085SJung-uk Kim static int tsc_disabled; 7679422085SJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 7779422085SJung-uk Kim "Disable x86 Time Stamp Counter"); 7879422085SJung-uk Kim TUNABLE_INT("machdep.disable_tsc", &tsc_disabled); 7979422085SJung-uk Kim 80a4e4127fSJung-uk Kim static int tsc_skip_calibration; 81a4e4127fSJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 82a4e4127fSJung-uk Kim &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 83a4e4127fSJung-uk Kim TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration); 84a4e4127fSJung-uk Kim 85dd7d207dSJung-uk Kim static void tsc_freq_changed(void *arg, const struct cf_level *level, 86dd7d207dSJung-uk Kim int status); 87dd7d207dSJung-uk Kim static void tsc_freq_changing(void *arg, const struct cf_level *level, 88dd7d207dSJung-uk Kim int *status); 89dd7d207dSJung-uk Kim static unsigned tsc_get_timecount(struct timecounter *tc); 90814124c3SKonstantin Belousov static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 91814124c3SKonstantin Belousov static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 92814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 93814124c3SKonstantin Belousov static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 94814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 95dd7d207dSJung-uk Kim static void tsc_levels_changed(void *arg, int unit); 96dd7d207dSJung-uk Kim 97dd7d207dSJung-uk Kim static struct timecounter tsc_timecounter = { 98dd7d207dSJung-uk Kim tsc_get_timecount, /* get_timecount */ 99dd7d207dSJung-uk Kim 0, /* no poll_pps */ 100dd7d207dSJung-uk Kim ~0u, /* counter_mask */ 101dd7d207dSJung-uk Kim 0, /* frequency */ 102dd7d207dSJung-uk Kim "TSC", /* name */ 103dd7d207dSJung-uk Kim 800, /* quality (adjusted in code) */ 104dd7d207dSJung-uk Kim }; 105dd7d207dSJung-uk Kim 1065da5812bSJung-uk Kim #define VMW_HVMAGIC 0x564d5868 1075da5812bSJung-uk Kim #define VMW_HVPORT 0x5658 1085da5812bSJung-uk Kim #define VMW_HVCMD_GETVERSION 10 1095da5812bSJung-uk Kim #define VMW_HVCMD_GETHZ 45 1105da5812bSJung-uk Kim 1115da5812bSJung-uk Kim static __inline void 1125da5812bSJung-uk Kim vmware_hvcall(u_int cmd, u_int *p) 1135da5812bSJung-uk Kim { 1145da5812bSJung-uk Kim 115a990fbf9SJung-uk Kim __asm __volatile("inl %w3, %0" 1165da5812bSJung-uk Kim : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 1175da5812bSJung-uk Kim : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 1185da5812bSJung-uk Kim : "memory"); 1195da5812bSJung-uk Kim } 1205da5812bSJung-uk Kim 1215da5812bSJung-uk Kim static int 1225da5812bSJung-uk Kim tsc_freq_vmware(void) 1235da5812bSJung-uk Kim { 1245da5812bSJung-uk Kim char hv_sig[13]; 1255da5812bSJung-uk Kim u_int regs[4]; 1265da5812bSJung-uk Kim char *p; 1275da5812bSJung-uk Kim u_int hv_high; 1285da5812bSJung-uk Kim int i; 1295da5812bSJung-uk Kim 1305da5812bSJung-uk Kim /* 1315da5812bSJung-uk Kim * [RFC] CPUID usage for interaction between Hypervisors and Linux. 1325da5812bSJung-uk Kim * http://lkml.org/lkml/2008/10/1/246 1335da5812bSJung-uk Kim * 1345da5812bSJung-uk Kim * KB1009458: Mechanisms to determine if software is running in 1355da5812bSJung-uk Kim * a VMware virtual machine 1365da5812bSJung-uk Kim * http://kb.vmware.com/kb/1009458 1375da5812bSJung-uk Kim */ 1385da5812bSJung-uk Kim hv_high = 0; 1395da5812bSJung-uk Kim if ((cpu_feature2 & CPUID2_HV) != 0) { 1405da5812bSJung-uk Kim do_cpuid(0x40000000, regs); 1415da5812bSJung-uk Kim hv_high = regs[0]; 1425da5812bSJung-uk Kim for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 1435da5812bSJung-uk Kim memcpy(p, ®s[i], sizeof(regs[i])); 1445da5812bSJung-uk Kim *p = '\0'; 1455da5812bSJung-uk Kim if (bootverbose) { 1465da5812bSJung-uk Kim /* 1475da5812bSJung-uk Kim * HV vendor ID string 1485da5812bSJung-uk Kim * ------------+-------------- 1495da5812bSJung-uk Kim * KVM "KVMKVMKVM" 1505da5812bSJung-uk Kim * Microsoft "Microsoft Hv" 1515da5812bSJung-uk Kim * VMware "VMwareVMware" 1525da5812bSJung-uk Kim * Xen "XenVMMXenVMM" 1535da5812bSJung-uk Kim */ 1545da5812bSJung-uk Kim printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 1555da5812bSJung-uk Kim } 1565da5812bSJung-uk Kim if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 1575da5812bSJung-uk Kim return (0); 1585da5812bSJung-uk Kim } else { 1595da5812bSJung-uk Kim p = getenv("smbios.system.serial"); 1605da5812bSJung-uk Kim if (p == NULL) 1615da5812bSJung-uk Kim return (0); 1625da5812bSJung-uk Kim if (strncmp(p, "VMware-", 7) != 0 && 1635da5812bSJung-uk Kim strncmp(p, "VMW", 3) != 0) { 1645da5812bSJung-uk Kim freeenv(p); 1655da5812bSJung-uk Kim return (0); 1665da5812bSJung-uk Kim } 1675da5812bSJung-uk Kim freeenv(p); 1685da5812bSJung-uk Kim vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 1695da5812bSJung-uk Kim if (regs[1] != VMW_HVMAGIC) 1705da5812bSJung-uk Kim return (0); 1715da5812bSJung-uk Kim } 1725da5812bSJung-uk Kim if (hv_high >= 0x40000010) { 1735da5812bSJung-uk Kim do_cpuid(0x40000010, regs); 1745da5812bSJung-uk Kim tsc_freq = regs[0] * 1000; 1755da5812bSJung-uk Kim } else { 1765da5812bSJung-uk Kim vmware_hvcall(VMW_HVCMD_GETHZ, regs); 1775da5812bSJung-uk Kim if (regs[1] != UINT_MAX) 1785da5812bSJung-uk Kim tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 1795da5812bSJung-uk Kim } 1805da5812bSJung-uk Kim tsc_is_invariant = 1; 1815da5812bSJung-uk Kim return (1); 1825da5812bSJung-uk Kim } 1835da5812bSJung-uk Kim 184a4e4127fSJung-uk Kim static void 185a4e4127fSJung-uk Kim tsc_freq_intel(void) 186dd7d207dSJung-uk Kim { 187a4e4127fSJung-uk Kim char brand[48]; 188a4e4127fSJung-uk Kim u_int regs[4]; 189a4e4127fSJung-uk Kim uint64_t freq; 190a4e4127fSJung-uk Kim char *p; 191a4e4127fSJung-uk Kim u_int i; 192dd7d207dSJung-uk Kim 193a4e4127fSJung-uk Kim /* 194a4e4127fSJung-uk Kim * Intel Processor Identification and the CPUID Instruction 195a4e4127fSJung-uk Kim * Application Note 485. 196a4e4127fSJung-uk Kim * http://www.intel.com/assets/pdf/appnote/241618.pdf 197a4e4127fSJung-uk Kim */ 198a4e4127fSJung-uk Kim if (cpu_exthigh >= 0x80000004) { 199a4e4127fSJung-uk Kim p = brand; 200a4e4127fSJung-uk Kim for (i = 0x80000002; i < 0x80000005; i++) { 201a4e4127fSJung-uk Kim do_cpuid(i, regs); 202a4e4127fSJung-uk Kim memcpy(p, regs, sizeof(regs)); 203a4e4127fSJung-uk Kim p += sizeof(regs); 204a4e4127fSJung-uk Kim } 205a4e4127fSJung-uk Kim p = NULL; 206a4e4127fSJung-uk Kim for (i = 0; i < sizeof(brand) - 1; i++) 207a4e4127fSJung-uk Kim if (brand[i] == 'H' && brand[i + 1] == 'z') 208a4e4127fSJung-uk Kim p = brand + i; 209a4e4127fSJung-uk Kim if (p != NULL) { 210a4e4127fSJung-uk Kim p -= 5; 211a4e4127fSJung-uk Kim switch (p[4]) { 212a4e4127fSJung-uk Kim case 'M': 213a4e4127fSJung-uk Kim i = 1; 214a4e4127fSJung-uk Kim break; 215a4e4127fSJung-uk Kim case 'G': 216a4e4127fSJung-uk Kim i = 1000; 217a4e4127fSJung-uk Kim break; 218a4e4127fSJung-uk Kim case 'T': 219a4e4127fSJung-uk Kim i = 1000000; 220a4e4127fSJung-uk Kim break; 221a4e4127fSJung-uk Kim default: 222dd7d207dSJung-uk Kim return; 223a4e4127fSJung-uk Kim } 224a4e4127fSJung-uk Kim #define C2D(c) ((c) - '0') 225a4e4127fSJung-uk Kim if (p[1] == '.') { 226a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 227a4e4127fSJung-uk Kim freq += C2D(p[2]) * 100; 228a4e4127fSJung-uk Kim freq += C2D(p[3]) * 10; 229a4e4127fSJung-uk Kim freq *= i * 1000; 230a4e4127fSJung-uk Kim } else { 231a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 232a4e4127fSJung-uk Kim freq += C2D(p[1]) * 100; 233a4e4127fSJung-uk Kim freq += C2D(p[2]) * 10; 234a4e4127fSJung-uk Kim freq += C2D(p[3]); 235a4e4127fSJung-uk Kim freq *= i * 1000000; 236a4e4127fSJung-uk Kim } 237a4e4127fSJung-uk Kim #undef C2D 238a4e4127fSJung-uk Kim tsc_freq = freq; 239a4e4127fSJung-uk Kim } 240a4e4127fSJung-uk Kim } 241a4e4127fSJung-uk Kim } 242dd7d207dSJung-uk Kim 243a4e4127fSJung-uk Kim static void 244a4e4127fSJung-uk Kim probe_tsc_freq(void) 245a4e4127fSJung-uk Kim { 246155094d7SJung-uk Kim u_int regs[4]; 247a4e4127fSJung-uk Kim uint64_t tsc1, tsc2; 248dd7d207dSJung-uk Kim 2495da5812bSJung-uk Kim if (cpu_high >= 6) { 2505da5812bSJung-uk Kim do_cpuid(6, regs); 2515da5812bSJung-uk Kim if ((regs[2] & CPUID_PERF_STAT) != 0) { 2525da5812bSJung-uk Kim /* 2535da5812bSJung-uk Kim * XXX Some emulators expose host CPUID without actual 2545da5812bSJung-uk Kim * support for these MSRs. We must test whether they 2555da5812bSJung-uk Kim * really work. 2565da5812bSJung-uk Kim */ 2575da5812bSJung-uk Kim wrmsr(MSR_MPERF, 0); 2585da5812bSJung-uk Kim wrmsr(MSR_APERF, 0); 2595da5812bSJung-uk Kim DELAY(10); 2605da5812bSJung-uk Kim if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 2615da5812bSJung-uk Kim tsc_perf_stat = 1; 2625da5812bSJung-uk Kim } 2635da5812bSJung-uk Kim } 2645da5812bSJung-uk Kim 2655da5812bSJung-uk Kim if (tsc_freq_vmware()) 2665da5812bSJung-uk Kim return; 2675da5812bSJung-uk Kim 268dd7d207dSJung-uk Kim switch (cpu_vendor_id) { 269dd7d207dSJung-uk Kim case CPU_VENDOR_AMD: 270a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 271a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 272a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) >= 0x10)) 273dd7d207dSJung-uk Kim tsc_is_invariant = 1; 274814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 275814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 276814124c3SKonstantin Belousov tsc_get_timecount_mfence; 277814124c3SKonstantin Belousov } 278dd7d207dSJung-uk Kim break; 279dd7d207dSJung-uk Kim case CPU_VENDOR_INTEL: 280a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 281a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 282a106a27cSJung-uk Kim ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 283dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xe) || 284dd7d207dSJung-uk Kim (CPUID_TO_FAMILY(cpu_id) == 0xf && 285a106a27cSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0x3)))) 286dd7d207dSJung-uk Kim tsc_is_invariant = 1; 287814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 288814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 289814124c3SKonstantin Belousov tsc_get_timecount_lfence; 290814124c3SKonstantin Belousov } 291dd7d207dSJung-uk Kim break; 292dd7d207dSJung-uk Kim case CPU_VENDOR_CENTAUR: 293a106a27cSJung-uk Kim if (vm_guest == VM_GUEST_NO && 294a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) == 0x6 && 295dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xf && 296dd7d207dSJung-uk Kim (rdmsr(0x1203) & 0x100000000ULL) == 0) 297dd7d207dSJung-uk Kim tsc_is_invariant = 1; 298814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 299814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 300814124c3SKonstantin Belousov tsc_get_timecount_lfence; 301814124c3SKonstantin Belousov } 302dd7d207dSJung-uk Kim break; 303dd7d207dSJung-uk Kim } 304dd7d207dSJung-uk Kim 305a4e4127fSJung-uk Kim if (tsc_skip_calibration) { 306a4e4127fSJung-uk Kim if (cpu_vendor_id == CPU_VENDOR_INTEL) 307a4e4127fSJung-uk Kim tsc_freq_intel(); 308a4e4127fSJung-uk Kim return; 309a4e4127fSJung-uk Kim } 310a4e4127fSJung-uk Kim 311a4e4127fSJung-uk Kim if (bootverbose) 312a4e4127fSJung-uk Kim printf("Calibrating TSC clock ... "); 313a4e4127fSJung-uk Kim tsc1 = rdtsc(); 314a4e4127fSJung-uk Kim DELAY(1000000); 315a4e4127fSJung-uk Kim tsc2 = rdtsc(); 316a4e4127fSJung-uk Kim tsc_freq = tsc2 - tsc1; 317a4e4127fSJung-uk Kim if (bootverbose) 318a4e4127fSJung-uk Kim printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 319a4e4127fSJung-uk Kim } 320a4e4127fSJung-uk Kim 321a4e4127fSJung-uk Kim void 322a4e4127fSJung-uk Kim init_TSC(void) 323a4e4127fSJung-uk Kim { 324a4e4127fSJung-uk Kim 325a4e4127fSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 326a4e4127fSJung-uk Kim return; 327a4e4127fSJung-uk Kim 328a4e4127fSJung-uk Kim probe_tsc_freq(); 329a4e4127fSJung-uk Kim 330dd7d207dSJung-uk Kim /* 331dd7d207dSJung-uk Kim * Inform CPU accounting about our boot-time clock rate. This will 332dd7d207dSJung-uk Kim * be updated if someone loads a cpufreq driver after boot that 333dd7d207dSJung-uk Kim * discovers a new max frequency. 334dd7d207dSJung-uk Kim */ 335a4e4127fSJung-uk Kim if (tsc_freq != 0) 3365ac44f72SJung-uk Kim set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 337dd7d207dSJung-uk Kim 338dd7d207dSJung-uk Kim if (tsc_is_invariant) 339dd7d207dSJung-uk Kim return; 340dd7d207dSJung-uk Kim 341dd7d207dSJung-uk Kim /* Register to find out about changes in CPU frequency. */ 342dd7d207dSJung-uk Kim tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 343dd7d207dSJung-uk Kim tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 344dd7d207dSJung-uk Kim tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 345dd7d207dSJung-uk Kim tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 346dd7d207dSJung-uk Kim tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 347dd7d207dSJung-uk Kim tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 348dd7d207dSJung-uk Kim } 349dd7d207dSJung-uk Kim 35065e7d70bSJung-uk Kim #ifdef SMP 35165e7d70bSJung-uk Kim 352814124c3SKonstantin Belousov /* 353814124c3SKonstantin Belousov * RDTSC is not a serializing instruction, and does not drain 354814124c3SKonstantin Belousov * instruction stream, so we need to drain the stream before executing 355814124c3SKonstantin Belousov * it. It could be fixed by use of RDTSCP, except the instruction is 356814124c3SKonstantin Belousov * not available everywhere. 357814124c3SKonstantin Belousov * 358814124c3SKonstantin Belousov * Use CPUID for draining in the boot-time SMP constistency test. The 359814124c3SKonstantin Belousov * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 360814124c3SKonstantin Belousov * and VIA) when SSE2 is present, and nothing on older machines which 361814124c3SKonstantin Belousov * also do not issue RDTSC prematurely. There, testing for SSE2 and 362e1a18e46SKonstantin Belousov * vendor is too cumbersome, and we learn about TSC presence from CPUID. 363814124c3SKonstantin Belousov * 364814124c3SKonstantin Belousov * Do not use do_cpuid(), since we do not need CPUID results, which 365814124c3SKonstantin Belousov * have to be written into memory with do_cpuid(). 366814124c3SKonstantin Belousov */ 36765e7d70bSJung-uk Kim #define TSC_READ(x) \ 36865e7d70bSJung-uk Kim static void \ 36965e7d70bSJung-uk Kim tsc_read_##x(void *arg) \ 37065e7d70bSJung-uk Kim { \ 3717bfcb3bbSJim Harris uint64_t *tsc = arg; \ 37265e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); \ 37365e7d70bSJung-uk Kim \ 374814124c3SKonstantin Belousov __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 3757bfcb3bbSJim Harris tsc[cpu * 3 + x] = rdtsc(); \ 37665e7d70bSJung-uk Kim } 37765e7d70bSJung-uk Kim TSC_READ(0) 37865e7d70bSJung-uk Kim TSC_READ(1) 37965e7d70bSJung-uk Kim TSC_READ(2) 38065e7d70bSJung-uk Kim #undef TSC_READ 38165e7d70bSJung-uk Kim 38265e7d70bSJung-uk Kim #define N 1000 38365e7d70bSJung-uk Kim 38465e7d70bSJung-uk Kim static void 38565e7d70bSJung-uk Kim comp_smp_tsc(void *arg) 38665e7d70bSJung-uk Kim { 3877bfcb3bbSJim Harris uint64_t *tsc; 3887bfcb3bbSJim Harris int64_t d1, d2; 38965e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); 39065e7d70bSJung-uk Kim u_int i, j, size; 39165e7d70bSJung-uk Kim 39265e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 39365e7d70bSJung-uk Kim for (i = 0, tsc = arg; i < N; i++, tsc += size) 39465e7d70bSJung-uk Kim CPU_FOREACH(j) { 39565e7d70bSJung-uk Kim if (j == cpu) 39665e7d70bSJung-uk Kim continue; 39765e7d70bSJung-uk Kim d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 39865e7d70bSJung-uk Kim d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 39965e7d70bSJung-uk Kim if (d1 <= 0 || d2 <= 0) { 40065e7d70bSJung-uk Kim smp_tsc = 0; 40165e7d70bSJung-uk Kim return; 40265e7d70bSJung-uk Kim } 40365e7d70bSJung-uk Kim } 40465e7d70bSJung-uk Kim } 40565e7d70bSJung-uk Kim 40665e7d70bSJung-uk Kim static int 407*e7f1427dSKonstantin Belousov test_tsc(void) 40865e7d70bSJung-uk Kim { 4097bfcb3bbSJim Harris uint64_t *data, *tsc; 41065e7d70bSJung-uk Kim u_int i, size; 41165e7d70bSJung-uk Kim 412*e7f1427dSKonstantin Belousov if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 41365e7d70bSJung-uk Kim return (-100); 41465e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 41565e7d70bSJung-uk Kim data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 41665e7d70bSJung-uk Kim for (i = 0, tsc = data; i < N; i++, tsc += size) 41765e7d70bSJung-uk Kim smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 41865e7d70bSJung-uk Kim smp_tsc = 1; /* XXX */ 41965e7d70bSJung-uk Kim smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 42065e7d70bSJung-uk Kim smp_no_rendevous_barrier, data); 42165e7d70bSJung-uk Kim free(data, M_TEMP); 42265e7d70bSJung-uk Kim if (bootverbose) 42365e7d70bSJung-uk Kim printf("SMP: %sed TSC synchronization test\n", 42465e7d70bSJung-uk Kim smp_tsc ? "pass" : "fail"); 42526e6537aSJung-uk Kim if (smp_tsc && tsc_is_invariant) { 42626e6537aSJung-uk Kim switch (cpu_vendor_id) { 42726e6537aSJung-uk Kim case CPU_VENDOR_AMD: 42826e6537aSJung-uk Kim /* 42926e6537aSJung-uk Kim * Starting with Family 15h processors, TSC clock 43026e6537aSJung-uk Kim * source is in the north bridge. Check whether 43126e6537aSJung-uk Kim * we have a single-socket/multi-core platform. 43226e6537aSJung-uk Kim * XXX Need more work for complex cases. 43326e6537aSJung-uk Kim */ 43426e6537aSJung-uk Kim if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 43526e6537aSJung-uk Kim (amd_feature2 & AMDID2_CMP) == 0 || 43626e6537aSJung-uk Kim smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 43726e6537aSJung-uk Kim break; 43826e6537aSJung-uk Kim return (1000); 43926e6537aSJung-uk Kim case CPU_VENDOR_INTEL: 44026e6537aSJung-uk Kim /* 44126e6537aSJung-uk Kim * XXX Assume Intel platforms have synchronized TSCs. 44226e6537aSJung-uk Kim */ 44326e6537aSJung-uk Kim return (1000); 44426e6537aSJung-uk Kim } 44526e6537aSJung-uk Kim return (800); 44626e6537aSJung-uk Kim } 44726e6537aSJung-uk Kim return (-100); 44865e7d70bSJung-uk Kim } 44965e7d70bSJung-uk Kim 45065e7d70bSJung-uk Kim #undef N 45165e7d70bSJung-uk Kim 452*e7f1427dSKonstantin Belousov #else 453*e7f1427dSKonstantin Belousov 454*e7f1427dSKonstantin Belousov /* 455*e7f1427dSKonstantin Belousov * The function is not called, it is provided to avoid linking failure 456*e7f1427dSKonstantin Belousov * on uniprocessor kernel. 457*e7f1427dSKonstantin Belousov */ 458*e7f1427dSKonstantin Belousov static int 459*e7f1427dSKonstantin Belousov test_tsc(void) 460*e7f1427dSKonstantin Belousov { 461*e7f1427dSKonstantin Belousov 462*e7f1427dSKonstantin Belousov return (0); 463*e7f1427dSKonstantin Belousov } 464*e7f1427dSKonstantin Belousov 46565e7d70bSJung-uk Kim #endif /* SMP */ 46665e7d70bSJung-uk Kim 46765e7d70bSJung-uk Kim static void 468dd7d207dSJung-uk Kim init_TSC_tc(void) 469dd7d207dSJung-uk Kim { 47095f2f098SJung-uk Kim uint64_t max_freq; 47195f2f098SJung-uk Kim int shift; 472dd7d207dSJung-uk Kim 47338b8542cSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 474dd7d207dSJung-uk Kim return; 475dd7d207dSJung-uk Kim 476dd7d207dSJung-uk Kim /* 47795f2f098SJung-uk Kim * Limit timecounter frequency to fit in an int and prevent it from 47895f2f098SJung-uk Kim * overflowing too fast. 47995f2f098SJung-uk Kim */ 48095f2f098SJung-uk Kim max_freq = UINT_MAX; 48195f2f098SJung-uk Kim 48295f2f098SJung-uk Kim /* 483dd7d207dSJung-uk Kim * We can not use the TSC if we support APM. Precise timekeeping 484dd7d207dSJung-uk Kim * on an APM'ed machine is at best a fools pursuit, since 485dd7d207dSJung-uk Kim * any and all of the time spent in various SMM code can't 486dd7d207dSJung-uk Kim * be reliably accounted for. Reading the RTC is your only 487dd7d207dSJung-uk Kim * source of reliable time info. The i8254 loses too, of course, 488dd7d207dSJung-uk Kim * but we need to have some kind of time... 489dd7d207dSJung-uk Kim * We don't know at this point whether APM is going to be used 490dd7d207dSJung-uk Kim * or not, nor when it might be activated. Play it safe. 491dd7d207dSJung-uk Kim */ 492dd7d207dSJung-uk Kim if (power_pm_get_type() == POWER_PM_TYPE_APM) { 493dd7d207dSJung-uk Kim tsc_timecounter.tc_quality = -1000; 494dd7d207dSJung-uk Kim if (bootverbose) 495dd7d207dSJung-uk Kim printf("TSC timecounter disabled: APM enabled.\n"); 49665e7d70bSJung-uk Kim goto init; 497dd7d207dSJung-uk Kim } 498dd7d207dSJung-uk Kim 499a49399a9SJung-uk Kim /* 500a49399a9SJung-uk Kim * We cannot use the TSC if it stops incrementing in deep sleep. 501a49399a9SJung-uk Kim * Currently only Intel CPUs are known for this problem unless 502a49399a9SJung-uk Kim * the invariant TSC bit is set. 503a49399a9SJung-uk Kim */ 504a49399a9SJung-uk Kim if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 505a49399a9SJung-uk Kim (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 506a49399a9SJung-uk Kim tsc_timecounter.tc_quality = -1000; 50708e1b4f4SJung-uk Kim tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 508a49399a9SJung-uk Kim if (bootverbose) 509a49399a9SJung-uk Kim printf("TSC timecounter disabled: C3 enabled.\n"); 510a49399a9SJung-uk Kim goto init; 511a49399a9SJung-uk Kim } 512a49399a9SJung-uk Kim 513dd7d207dSJung-uk Kim /* 514*e7f1427dSKonstantin Belousov * We can not use the TSC in SMP mode unless the TSCs on all CPUs 515*e7f1427dSKonstantin Belousov * are synchronized. If the user is sure that the system has 516*e7f1427dSKonstantin Belousov * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 517*e7f1427dSKonstantin Belousov * non-zero value. The TSC seems unreliable in virtualized SMP 5185cf8ac1bSMike Silbersack * environments, so it is set to a negative quality in those cases. 519dd7d207dSJung-uk Kim */ 520*e7f1427dSKonstantin Belousov if (mp_ncpus > 1) 521*e7f1427dSKonstantin Belousov tsc_timecounter.tc_quality = test_tsc(); 522*e7f1427dSKonstantin Belousov else if (tsc_is_invariant) 52326e6537aSJung-uk Kim tsc_timecounter.tc_quality = 1000; 524*e7f1427dSKonstantin Belousov max_freq >>= tsc_shift; 52526e6537aSJung-uk Kim 52665e7d70bSJung-uk Kim init: 527*e7f1427dSKonstantin Belousov for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 52895f2f098SJung-uk Kim ; 529*e7f1427dSKonstantin Belousov if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 530814124c3SKonstantin Belousov if (cpu_vendor_id == CPU_VENDOR_AMD) { 531*e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 532*e7f1427dSKonstantin Belousov tsc_get_timecount_low_mfence : 533*e7f1427dSKonstantin Belousov tsc_get_timecount_mfence; 534814124c3SKonstantin Belousov } else { 535*e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 536*e7f1427dSKonstantin Belousov tsc_get_timecount_low_lfence : 537*e7f1427dSKonstantin Belousov tsc_get_timecount_lfence; 538814124c3SKonstantin Belousov } 539*e7f1427dSKonstantin Belousov } else { 540*e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 541*e7f1427dSKonstantin Belousov tsc_get_timecount_low : tsc_get_timecount; 542*e7f1427dSKonstantin Belousov } 543*e7f1427dSKonstantin Belousov if (shift > 0) { 54495f2f098SJung-uk Kim tsc_timecounter.tc_name = "TSC-low"; 54595f2f098SJung-uk Kim if (bootverbose) 546bc8e4ad2SJung-uk Kim printf("TSC timecounter discards lower %d bit(s)\n", 54795f2f098SJung-uk Kim shift); 54895f2f098SJung-uk Kim } 549bc34c87eSJung-uk Kim if (tsc_freq != 0) { 55095f2f098SJung-uk Kim tsc_timecounter.tc_frequency = tsc_freq >> shift; 55195f2f098SJung-uk Kim tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 552dd7d207dSJung-uk Kim tc_init(&tsc_timecounter); 553dd7d207dSJung-uk Kim } 554dd7d207dSJung-uk Kim } 55565e7d70bSJung-uk Kim SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 556dd7d207dSJung-uk Kim 557dd7d207dSJung-uk Kim /* 558dd7d207dSJung-uk Kim * When cpufreq levels change, find out about the (new) max frequency. We 559dd7d207dSJung-uk Kim * use this to update CPU accounting in case it got a lower estimate at boot. 560dd7d207dSJung-uk Kim */ 561dd7d207dSJung-uk Kim static void 562dd7d207dSJung-uk Kim tsc_levels_changed(void *arg, int unit) 563dd7d207dSJung-uk Kim { 564dd7d207dSJung-uk Kim device_t cf_dev; 565dd7d207dSJung-uk Kim struct cf_level *levels; 566dd7d207dSJung-uk Kim int count, error; 567dd7d207dSJung-uk Kim uint64_t max_freq; 568dd7d207dSJung-uk Kim 569dd7d207dSJung-uk Kim /* Only use values from the first CPU, assuming all are equal. */ 570dd7d207dSJung-uk Kim if (unit != 0) 571dd7d207dSJung-uk Kim return; 572dd7d207dSJung-uk Kim 573dd7d207dSJung-uk Kim /* Find the appropriate cpufreq device instance. */ 574dd7d207dSJung-uk Kim cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 575dd7d207dSJung-uk Kim if (cf_dev == NULL) { 576dd7d207dSJung-uk Kim printf("tsc_levels_changed() called but no cpufreq device?\n"); 577dd7d207dSJung-uk Kim return; 578dd7d207dSJung-uk Kim } 579dd7d207dSJung-uk Kim 580dd7d207dSJung-uk Kim /* Get settings from the device and find the max frequency. */ 581dd7d207dSJung-uk Kim count = 64; 582dd7d207dSJung-uk Kim levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 583dd7d207dSJung-uk Kim if (levels == NULL) 584dd7d207dSJung-uk Kim return; 585dd7d207dSJung-uk Kim error = CPUFREQ_LEVELS(cf_dev, levels, &count); 586dd7d207dSJung-uk Kim if (error == 0 && count != 0) { 587dd7d207dSJung-uk Kim max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 588dd7d207dSJung-uk Kim set_cputicker(rdtsc, max_freq, 1); 589dd7d207dSJung-uk Kim } else 590dd7d207dSJung-uk Kim printf("tsc_levels_changed: no max freq found\n"); 591dd7d207dSJung-uk Kim free(levels, M_TEMP); 592dd7d207dSJung-uk Kim } 593dd7d207dSJung-uk Kim 594dd7d207dSJung-uk Kim /* 595dd7d207dSJung-uk Kim * If the TSC timecounter is in use, veto the pending change. It may be 596dd7d207dSJung-uk Kim * possible in the future to handle a dynamically-changing timecounter rate. 597dd7d207dSJung-uk Kim */ 598dd7d207dSJung-uk Kim static void 599dd7d207dSJung-uk Kim tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 600dd7d207dSJung-uk Kim { 601dd7d207dSJung-uk Kim 602dd7d207dSJung-uk Kim if (*status != 0 || timecounter != &tsc_timecounter) 603dd7d207dSJung-uk Kim return; 604dd7d207dSJung-uk Kim 605dd7d207dSJung-uk Kim printf("timecounter TSC must not be in use when " 606dd7d207dSJung-uk Kim "changing frequencies; change denied\n"); 607dd7d207dSJung-uk Kim *status = EBUSY; 608dd7d207dSJung-uk Kim } 609dd7d207dSJung-uk Kim 610dd7d207dSJung-uk Kim /* Update TSC freq with the value indicated by the caller. */ 611dd7d207dSJung-uk Kim static void 612dd7d207dSJung-uk Kim tsc_freq_changed(void *arg, const struct cf_level *level, int status) 613dd7d207dSJung-uk Kim { 6143453537fSJung-uk Kim uint64_t freq; 615dd7d207dSJung-uk Kim 616dd7d207dSJung-uk Kim /* If there was an error during the transition, don't do anything. */ 61779422085SJung-uk Kim if (tsc_disabled || status != 0) 618dd7d207dSJung-uk Kim return; 619dd7d207dSJung-uk Kim 620dd7d207dSJung-uk Kim /* Total setting for this level gives the new frequency in MHz. */ 6213453537fSJung-uk Kim freq = (uint64_t)level->total_set.freq * 1000000; 6223453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 62395f2f098SJung-uk Kim tsc_timecounter.tc_frequency = 62495f2f098SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 625dd7d207dSJung-uk Kim } 626dd7d207dSJung-uk Kim 627dd7d207dSJung-uk Kim static int 628dd7d207dSJung-uk Kim sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 629dd7d207dSJung-uk Kim { 630dd7d207dSJung-uk Kim int error; 631dd7d207dSJung-uk Kim uint64_t freq; 632dd7d207dSJung-uk Kim 6333453537fSJung-uk Kim freq = atomic_load_acq_64(&tsc_freq); 6343453537fSJung-uk Kim if (freq == 0) 635dd7d207dSJung-uk Kim return (EOPNOTSUPP); 636cbc134adSMatthew D Fleming error = sysctl_handle_64(oidp, &freq, 0, req); 6377ebbcb21SJung-uk Kim if (error == 0 && req->newptr != NULL) { 6383453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 639bc8e4ad2SJung-uk Kim atomic_store_rel_64(&tsc_timecounter.tc_frequency, 640bc8e4ad2SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 6417ebbcb21SJung-uk Kim } 642dd7d207dSJung-uk Kim return (error); 643dd7d207dSJung-uk Kim } 644dd7d207dSJung-uk Kim 645cbc134adSMatthew D Fleming SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 6465331d61dSJung-uk Kim 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 647dd7d207dSJung-uk Kim 648727c7b2dSJung-uk Kim static u_int 64995f2f098SJung-uk Kim tsc_get_timecount(struct timecounter *tc __unused) 650dd7d207dSJung-uk Kim { 651727c7b2dSJung-uk Kim 652727c7b2dSJung-uk Kim return (rdtsc32()); 653dd7d207dSJung-uk Kim } 65495f2f098SJung-uk Kim 655814124c3SKonstantin Belousov static inline u_int 656bc8e4ad2SJung-uk Kim tsc_get_timecount_low(struct timecounter *tc) 65795f2f098SJung-uk Kim { 6585df88f46SJung-uk Kim uint32_t rv; 65995f2f098SJung-uk Kim 6605df88f46SJung-uk Kim __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 6615df88f46SJung-uk Kim : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 6625df88f46SJung-uk Kim return (rv); 66395f2f098SJung-uk Kim } 664aea81038SKonstantin Belousov 665814124c3SKonstantin Belousov static u_int 666814124c3SKonstantin Belousov tsc_get_timecount_lfence(struct timecounter *tc __unused) 667814124c3SKonstantin Belousov { 668814124c3SKonstantin Belousov 669814124c3SKonstantin Belousov lfence(); 670814124c3SKonstantin Belousov return (rdtsc32()); 671814124c3SKonstantin Belousov } 672814124c3SKonstantin Belousov 673814124c3SKonstantin Belousov static u_int 674814124c3SKonstantin Belousov tsc_get_timecount_low_lfence(struct timecounter *tc) 675814124c3SKonstantin Belousov { 676814124c3SKonstantin Belousov 677814124c3SKonstantin Belousov lfence(); 678814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 679814124c3SKonstantin Belousov } 680814124c3SKonstantin Belousov 681814124c3SKonstantin Belousov static u_int 682814124c3SKonstantin Belousov tsc_get_timecount_mfence(struct timecounter *tc __unused) 683814124c3SKonstantin Belousov { 684814124c3SKonstantin Belousov 685814124c3SKonstantin Belousov mfence(); 686814124c3SKonstantin Belousov return (rdtsc32()); 687814124c3SKonstantin Belousov } 688814124c3SKonstantin Belousov 689814124c3SKonstantin Belousov static u_int 690814124c3SKonstantin Belousov tsc_get_timecount_low_mfence(struct timecounter *tc) 691814124c3SKonstantin Belousov { 692814124c3SKonstantin Belousov 693814124c3SKonstantin Belousov mfence(); 694814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 695814124c3SKonstantin Belousov } 696814124c3SKonstantin Belousov 697aea81038SKonstantin Belousov uint32_t 698aea81038SKonstantin Belousov cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th) 699aea81038SKonstantin Belousov { 700aea81038SKonstantin Belousov 701aea81038SKonstantin Belousov vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 702aea81038SKonstantin Belousov bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 703aea81038SKonstantin Belousov return (timecounter == &tsc_timecounter); 704aea81038SKonstantin Belousov } 705aea81038SKonstantin Belousov 706aea81038SKonstantin Belousov #ifdef COMPAT_FREEBSD32 707aea81038SKonstantin Belousov uint32_t 708aea81038SKonstantin Belousov cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 709aea81038SKonstantin Belousov { 710aea81038SKonstantin Belousov 711aea81038SKonstantin Belousov vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 712aea81038SKonstantin Belousov bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 713aea81038SKonstantin Belousov return (timecounter == &tsc_timecounter); 714aea81038SKonstantin Belousov } 715aea81038SKonstantin Belousov #endif 716