1dd7d207dSJung-uk Kim /*- 2dd7d207dSJung-uk Kim * Copyright (c) 1998-2003 Poul-Henning Kamp 3dd7d207dSJung-uk Kim * All rights reserved. 4dd7d207dSJung-uk Kim * 5dd7d207dSJung-uk Kim * Redistribution and use in source and binary forms, with or without 6dd7d207dSJung-uk Kim * modification, are permitted provided that the following conditions 7dd7d207dSJung-uk Kim * are met: 8dd7d207dSJung-uk Kim * 1. Redistributions of source code must retain the above copyright 9dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer. 10dd7d207dSJung-uk Kim * 2. Redistributions in binary form must reproduce the above copyright 11dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer in the 12dd7d207dSJung-uk Kim * documentation and/or other materials provided with the distribution. 13dd7d207dSJung-uk Kim * 14dd7d207dSJung-uk Kim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15dd7d207dSJung-uk Kim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16dd7d207dSJung-uk Kim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17dd7d207dSJung-uk Kim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18dd7d207dSJung-uk Kim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19dd7d207dSJung-uk Kim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20dd7d207dSJung-uk Kim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21dd7d207dSJung-uk Kim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22dd7d207dSJung-uk Kim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23dd7d207dSJung-uk Kim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24dd7d207dSJung-uk Kim * SUCH DAMAGE. 25dd7d207dSJung-uk Kim */ 26dd7d207dSJung-uk Kim 27dd7d207dSJung-uk Kim #include <sys/cdefs.h> 28dd7d207dSJung-uk Kim __FBSDID("$FreeBSD$"); 29dd7d207dSJung-uk Kim 30aea81038SKonstantin Belousov #include "opt_compat.h" 31dd7d207dSJung-uk Kim #include "opt_clock.h" 32dd7d207dSJung-uk Kim 33dd7d207dSJung-uk Kim #include <sys/param.h> 34dd7d207dSJung-uk Kim #include <sys/bus.h> 35dd7d207dSJung-uk Kim #include <sys/cpu.h> 365da5812bSJung-uk Kim #include <sys/limits.h> 37dd7d207dSJung-uk Kim #include <sys/malloc.h> 38dd7d207dSJung-uk Kim #include <sys/systm.h> 39dd7d207dSJung-uk Kim #include <sys/sysctl.h> 40dd7d207dSJung-uk Kim #include <sys/time.h> 41dd7d207dSJung-uk Kim #include <sys/timetc.h> 42dd7d207dSJung-uk Kim #include <sys/kernel.h> 43dd7d207dSJung-uk Kim #include <sys/power.h> 44dd7d207dSJung-uk Kim #include <sys/smp.h> 45aea81038SKonstantin Belousov #include <sys/vdso.h> 46dd7d207dSJung-uk Kim #include <machine/clock.h> 47dd7d207dSJung-uk Kim #include <machine/cputypes.h> 48dd7d207dSJung-uk Kim #include <machine/md_var.h> 49dd7d207dSJung-uk Kim #include <machine/specialreg.h> 50dd7d207dSJung-uk Kim 51dd7d207dSJung-uk Kim #include "cpufreq_if.h" 52dd7d207dSJung-uk Kim 53dd7d207dSJung-uk Kim uint64_t tsc_freq; 54dd7d207dSJung-uk Kim int tsc_is_invariant; 55155094d7SJung-uk Kim int tsc_perf_stat; 56155094d7SJung-uk Kim 57dd7d207dSJung-uk Kim static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 58dd7d207dSJung-uk Kim 59dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 60dd7d207dSJung-uk Kim &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 61dd7d207dSJung-uk Kim TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); 62dd7d207dSJung-uk Kim 63dd7d207dSJung-uk Kim #ifdef SMP 64dd7d207dSJung-uk Kim static int smp_tsc; 65dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 66dd7d207dSJung-uk Kim "Indicates whether the TSC is safe to use in SMP mode"); 67dd7d207dSJung-uk Kim TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); 68dd7d207dSJung-uk Kim #endif 69dd7d207dSJung-uk Kim 7079422085SJung-uk Kim static int tsc_disabled; 7179422085SJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 7279422085SJung-uk Kim "Disable x86 Time Stamp Counter"); 7379422085SJung-uk Kim TUNABLE_INT("machdep.disable_tsc", &tsc_disabled); 7479422085SJung-uk Kim 75a4e4127fSJung-uk Kim static int tsc_skip_calibration; 76a4e4127fSJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 77a4e4127fSJung-uk Kim &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 78a4e4127fSJung-uk Kim TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration); 79a4e4127fSJung-uk Kim 80dd7d207dSJung-uk Kim static void tsc_freq_changed(void *arg, const struct cf_level *level, 81dd7d207dSJung-uk Kim int status); 82dd7d207dSJung-uk Kim static void tsc_freq_changing(void *arg, const struct cf_level *level, 83dd7d207dSJung-uk Kim int *status); 84dd7d207dSJung-uk Kim static unsigned tsc_get_timecount(struct timecounter *tc); 85*814124c3SKonstantin Belousov static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 86*814124c3SKonstantin Belousov static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 87*814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 88*814124c3SKonstantin Belousov static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 89*814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 90dd7d207dSJung-uk Kim static void tsc_levels_changed(void *arg, int unit); 91dd7d207dSJung-uk Kim 92dd7d207dSJung-uk Kim static struct timecounter tsc_timecounter = { 93dd7d207dSJung-uk Kim tsc_get_timecount, /* get_timecount */ 94dd7d207dSJung-uk Kim 0, /* no poll_pps */ 95dd7d207dSJung-uk Kim ~0u, /* counter_mask */ 96dd7d207dSJung-uk Kim 0, /* frequency */ 97dd7d207dSJung-uk Kim "TSC", /* name */ 98dd7d207dSJung-uk Kim 800, /* quality (adjusted in code) */ 99dd7d207dSJung-uk Kim }; 100dd7d207dSJung-uk Kim 1015da5812bSJung-uk Kim #define VMW_HVMAGIC 0x564d5868 1025da5812bSJung-uk Kim #define VMW_HVPORT 0x5658 1035da5812bSJung-uk Kim #define VMW_HVCMD_GETVERSION 10 1045da5812bSJung-uk Kim #define VMW_HVCMD_GETHZ 45 1055da5812bSJung-uk Kim 1065da5812bSJung-uk Kim static __inline void 1075da5812bSJung-uk Kim vmware_hvcall(u_int cmd, u_int *p) 1085da5812bSJung-uk Kim { 1095da5812bSJung-uk Kim 110a990fbf9SJung-uk Kim __asm __volatile("inl %w3, %0" 1115da5812bSJung-uk Kim : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 1125da5812bSJung-uk Kim : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 1135da5812bSJung-uk Kim : "memory"); 1145da5812bSJung-uk Kim } 1155da5812bSJung-uk Kim 1165da5812bSJung-uk Kim static int 1175da5812bSJung-uk Kim tsc_freq_vmware(void) 1185da5812bSJung-uk Kim { 1195da5812bSJung-uk Kim char hv_sig[13]; 1205da5812bSJung-uk Kim u_int regs[4]; 1215da5812bSJung-uk Kim char *p; 1225da5812bSJung-uk Kim u_int hv_high; 1235da5812bSJung-uk Kim int i; 1245da5812bSJung-uk Kim 1255da5812bSJung-uk Kim /* 1265da5812bSJung-uk Kim * [RFC] CPUID usage for interaction between Hypervisors and Linux. 1275da5812bSJung-uk Kim * http://lkml.org/lkml/2008/10/1/246 1285da5812bSJung-uk Kim * 1295da5812bSJung-uk Kim * KB1009458: Mechanisms to determine if software is running in 1305da5812bSJung-uk Kim * a VMware virtual machine 1315da5812bSJung-uk Kim * http://kb.vmware.com/kb/1009458 1325da5812bSJung-uk Kim */ 1335da5812bSJung-uk Kim hv_high = 0; 1345da5812bSJung-uk Kim if ((cpu_feature2 & CPUID2_HV) != 0) { 1355da5812bSJung-uk Kim do_cpuid(0x40000000, regs); 1365da5812bSJung-uk Kim hv_high = regs[0]; 1375da5812bSJung-uk Kim for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 1385da5812bSJung-uk Kim memcpy(p, ®s[i], sizeof(regs[i])); 1395da5812bSJung-uk Kim *p = '\0'; 1405da5812bSJung-uk Kim if (bootverbose) { 1415da5812bSJung-uk Kim /* 1425da5812bSJung-uk Kim * HV vendor ID string 1435da5812bSJung-uk Kim * ------------+-------------- 1445da5812bSJung-uk Kim * KVM "KVMKVMKVM" 1455da5812bSJung-uk Kim * Microsoft "Microsoft Hv" 1465da5812bSJung-uk Kim * VMware "VMwareVMware" 1475da5812bSJung-uk Kim * Xen "XenVMMXenVMM" 1485da5812bSJung-uk Kim */ 1495da5812bSJung-uk Kim printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 1505da5812bSJung-uk Kim } 1515da5812bSJung-uk Kim if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 1525da5812bSJung-uk Kim return (0); 1535da5812bSJung-uk Kim } else { 1545da5812bSJung-uk Kim p = getenv("smbios.system.serial"); 1555da5812bSJung-uk Kim if (p == NULL) 1565da5812bSJung-uk Kim return (0); 1575da5812bSJung-uk Kim if (strncmp(p, "VMware-", 7) != 0 && 1585da5812bSJung-uk Kim strncmp(p, "VMW", 3) != 0) { 1595da5812bSJung-uk Kim freeenv(p); 1605da5812bSJung-uk Kim return (0); 1615da5812bSJung-uk Kim } 1625da5812bSJung-uk Kim freeenv(p); 1635da5812bSJung-uk Kim vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 1645da5812bSJung-uk Kim if (regs[1] != VMW_HVMAGIC) 1655da5812bSJung-uk Kim return (0); 1665da5812bSJung-uk Kim } 1675da5812bSJung-uk Kim if (hv_high >= 0x40000010) { 1685da5812bSJung-uk Kim do_cpuid(0x40000010, regs); 1695da5812bSJung-uk Kim tsc_freq = regs[0] * 1000; 1705da5812bSJung-uk Kim } else { 1715da5812bSJung-uk Kim vmware_hvcall(VMW_HVCMD_GETHZ, regs); 1725da5812bSJung-uk Kim if (regs[1] != UINT_MAX) 1735da5812bSJung-uk Kim tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 1745da5812bSJung-uk Kim } 1755da5812bSJung-uk Kim tsc_is_invariant = 1; 1765da5812bSJung-uk Kim return (1); 1775da5812bSJung-uk Kim } 1785da5812bSJung-uk Kim 179a4e4127fSJung-uk Kim static void 180a4e4127fSJung-uk Kim tsc_freq_intel(void) 181dd7d207dSJung-uk Kim { 182a4e4127fSJung-uk Kim char brand[48]; 183a4e4127fSJung-uk Kim u_int regs[4]; 184a4e4127fSJung-uk Kim uint64_t freq; 185a4e4127fSJung-uk Kim char *p; 186a4e4127fSJung-uk Kim u_int i; 187dd7d207dSJung-uk Kim 188a4e4127fSJung-uk Kim /* 189a4e4127fSJung-uk Kim * Intel Processor Identification and the CPUID Instruction 190a4e4127fSJung-uk Kim * Application Note 485. 191a4e4127fSJung-uk Kim * http://www.intel.com/assets/pdf/appnote/241618.pdf 192a4e4127fSJung-uk Kim */ 193a4e4127fSJung-uk Kim if (cpu_exthigh >= 0x80000004) { 194a4e4127fSJung-uk Kim p = brand; 195a4e4127fSJung-uk Kim for (i = 0x80000002; i < 0x80000005; i++) { 196a4e4127fSJung-uk Kim do_cpuid(i, regs); 197a4e4127fSJung-uk Kim memcpy(p, regs, sizeof(regs)); 198a4e4127fSJung-uk Kim p += sizeof(regs); 199a4e4127fSJung-uk Kim } 200a4e4127fSJung-uk Kim p = NULL; 201a4e4127fSJung-uk Kim for (i = 0; i < sizeof(brand) - 1; i++) 202a4e4127fSJung-uk Kim if (brand[i] == 'H' && brand[i + 1] == 'z') 203a4e4127fSJung-uk Kim p = brand + i; 204a4e4127fSJung-uk Kim if (p != NULL) { 205a4e4127fSJung-uk Kim p -= 5; 206a4e4127fSJung-uk Kim switch (p[4]) { 207a4e4127fSJung-uk Kim case 'M': 208a4e4127fSJung-uk Kim i = 1; 209a4e4127fSJung-uk Kim break; 210a4e4127fSJung-uk Kim case 'G': 211a4e4127fSJung-uk Kim i = 1000; 212a4e4127fSJung-uk Kim break; 213a4e4127fSJung-uk Kim case 'T': 214a4e4127fSJung-uk Kim i = 1000000; 215a4e4127fSJung-uk Kim break; 216a4e4127fSJung-uk Kim default: 217dd7d207dSJung-uk Kim return; 218a4e4127fSJung-uk Kim } 219a4e4127fSJung-uk Kim #define C2D(c) ((c) - '0') 220a4e4127fSJung-uk Kim if (p[1] == '.') { 221a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 222a4e4127fSJung-uk Kim freq += C2D(p[2]) * 100; 223a4e4127fSJung-uk Kim freq += C2D(p[3]) * 10; 224a4e4127fSJung-uk Kim freq *= i * 1000; 225a4e4127fSJung-uk Kim } else { 226a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 227a4e4127fSJung-uk Kim freq += C2D(p[1]) * 100; 228a4e4127fSJung-uk Kim freq += C2D(p[2]) * 10; 229a4e4127fSJung-uk Kim freq += C2D(p[3]); 230a4e4127fSJung-uk Kim freq *= i * 1000000; 231a4e4127fSJung-uk Kim } 232a4e4127fSJung-uk Kim #undef C2D 233a4e4127fSJung-uk Kim tsc_freq = freq; 234a4e4127fSJung-uk Kim } 235a4e4127fSJung-uk Kim } 236a4e4127fSJung-uk Kim } 237dd7d207dSJung-uk Kim 238a4e4127fSJung-uk Kim static void 239a4e4127fSJung-uk Kim probe_tsc_freq(void) 240a4e4127fSJung-uk Kim { 241155094d7SJung-uk Kim u_int regs[4]; 242a4e4127fSJung-uk Kim uint64_t tsc1, tsc2; 243dd7d207dSJung-uk Kim 2445da5812bSJung-uk Kim if (cpu_high >= 6) { 2455da5812bSJung-uk Kim do_cpuid(6, regs); 2465da5812bSJung-uk Kim if ((regs[2] & CPUID_PERF_STAT) != 0) { 2475da5812bSJung-uk Kim /* 2485da5812bSJung-uk Kim * XXX Some emulators expose host CPUID without actual 2495da5812bSJung-uk Kim * support for these MSRs. We must test whether they 2505da5812bSJung-uk Kim * really work. 2515da5812bSJung-uk Kim */ 2525da5812bSJung-uk Kim wrmsr(MSR_MPERF, 0); 2535da5812bSJung-uk Kim wrmsr(MSR_APERF, 0); 2545da5812bSJung-uk Kim DELAY(10); 2555da5812bSJung-uk Kim if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 2565da5812bSJung-uk Kim tsc_perf_stat = 1; 2575da5812bSJung-uk Kim } 2585da5812bSJung-uk Kim } 2595da5812bSJung-uk Kim 2605da5812bSJung-uk Kim if (tsc_freq_vmware()) 2615da5812bSJung-uk Kim return; 2625da5812bSJung-uk Kim 263dd7d207dSJung-uk Kim switch (cpu_vendor_id) { 264dd7d207dSJung-uk Kim case CPU_VENDOR_AMD: 265a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 266a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 267a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) >= 0x10)) 268dd7d207dSJung-uk Kim tsc_is_invariant = 1; 269*814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 270*814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 271*814124c3SKonstantin Belousov tsc_get_timecount_mfence; 272*814124c3SKonstantin Belousov } 273dd7d207dSJung-uk Kim break; 274dd7d207dSJung-uk Kim case CPU_VENDOR_INTEL: 275a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 276a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 277a106a27cSJung-uk Kim ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 278dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xe) || 279dd7d207dSJung-uk Kim (CPUID_TO_FAMILY(cpu_id) == 0xf && 280a106a27cSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0x3)))) 281dd7d207dSJung-uk Kim tsc_is_invariant = 1; 282*814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 283*814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 284*814124c3SKonstantin Belousov tsc_get_timecount_lfence; 285*814124c3SKonstantin Belousov } 286dd7d207dSJung-uk Kim break; 287dd7d207dSJung-uk Kim case CPU_VENDOR_CENTAUR: 288a106a27cSJung-uk Kim if (vm_guest == VM_GUEST_NO && 289a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) == 0x6 && 290dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xf && 291dd7d207dSJung-uk Kim (rdmsr(0x1203) & 0x100000000ULL) == 0) 292dd7d207dSJung-uk Kim tsc_is_invariant = 1; 293*814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 294*814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 295*814124c3SKonstantin Belousov tsc_get_timecount_lfence; 296*814124c3SKonstantin Belousov } 297dd7d207dSJung-uk Kim break; 298dd7d207dSJung-uk Kim } 299dd7d207dSJung-uk Kim 300a4e4127fSJung-uk Kim if (tsc_skip_calibration) { 301a4e4127fSJung-uk Kim if (cpu_vendor_id == CPU_VENDOR_INTEL) 302a4e4127fSJung-uk Kim tsc_freq_intel(); 303a4e4127fSJung-uk Kim return; 304a4e4127fSJung-uk Kim } 305a4e4127fSJung-uk Kim 306a4e4127fSJung-uk Kim if (bootverbose) 307a4e4127fSJung-uk Kim printf("Calibrating TSC clock ... "); 308a4e4127fSJung-uk Kim tsc1 = rdtsc(); 309a4e4127fSJung-uk Kim DELAY(1000000); 310a4e4127fSJung-uk Kim tsc2 = rdtsc(); 311a4e4127fSJung-uk Kim tsc_freq = tsc2 - tsc1; 312a4e4127fSJung-uk Kim if (bootverbose) 313a4e4127fSJung-uk Kim printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 314a4e4127fSJung-uk Kim } 315a4e4127fSJung-uk Kim 316a4e4127fSJung-uk Kim void 317a4e4127fSJung-uk Kim init_TSC(void) 318a4e4127fSJung-uk Kim { 319a4e4127fSJung-uk Kim 320a4e4127fSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 321a4e4127fSJung-uk Kim return; 322a4e4127fSJung-uk Kim 323a4e4127fSJung-uk Kim probe_tsc_freq(); 324a4e4127fSJung-uk Kim 325dd7d207dSJung-uk Kim /* 326dd7d207dSJung-uk Kim * Inform CPU accounting about our boot-time clock rate. This will 327dd7d207dSJung-uk Kim * be updated if someone loads a cpufreq driver after boot that 328dd7d207dSJung-uk Kim * discovers a new max frequency. 329dd7d207dSJung-uk Kim */ 330a4e4127fSJung-uk Kim if (tsc_freq != 0) 3315ac44f72SJung-uk Kim set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 332dd7d207dSJung-uk Kim 333dd7d207dSJung-uk Kim if (tsc_is_invariant) 334dd7d207dSJung-uk Kim return; 335dd7d207dSJung-uk Kim 336dd7d207dSJung-uk Kim /* Register to find out about changes in CPU frequency. */ 337dd7d207dSJung-uk Kim tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 338dd7d207dSJung-uk Kim tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 339dd7d207dSJung-uk Kim tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 340dd7d207dSJung-uk Kim tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 341dd7d207dSJung-uk Kim tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 342dd7d207dSJung-uk Kim tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 343dd7d207dSJung-uk Kim } 344dd7d207dSJung-uk Kim 34565e7d70bSJung-uk Kim #ifdef SMP 34665e7d70bSJung-uk Kim 347*814124c3SKonstantin Belousov /* 348*814124c3SKonstantin Belousov * RDTSC is not a serializing instruction, and does not drain 349*814124c3SKonstantin Belousov * instruction stream, so we need to drain the stream before executing 350*814124c3SKonstantin Belousov * it. It could be fixed by use of RDTSCP, except the instruction is 351*814124c3SKonstantin Belousov * not available everywhere. 352*814124c3SKonstantin Belousov * 353*814124c3SKonstantin Belousov * Use CPUID for draining in the boot-time SMP constistency test. The 354*814124c3SKonstantin Belousov * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 355*814124c3SKonstantin Belousov * and VIA) when SSE2 is present, and nothing on older machines which 356*814124c3SKonstantin Belousov * also do not issue RDTSC prematurely. There, testing for SSE2 and 357*814124c3SKonstantin Belousov * vendor is too cumbersome, and we learn about TSC presence from 358*814124c3SKonstantin Belousov * CPUID. 359*814124c3SKonstantin Belousov * 360*814124c3SKonstantin Belousov * Do not use do_cpuid(), since we do not need CPUID results, which 361*814124c3SKonstantin Belousov * have to be written into memory with do_cpuid(). 362*814124c3SKonstantin Belousov */ 36365e7d70bSJung-uk Kim #define TSC_READ(x) \ 36465e7d70bSJung-uk Kim static void \ 36565e7d70bSJung-uk Kim tsc_read_##x(void *arg) \ 36665e7d70bSJung-uk Kim { \ 36765e7d70bSJung-uk Kim uint32_t *tsc = arg; \ 36865e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); \ 36965e7d70bSJung-uk Kim \ 370*814124c3SKonstantin Belousov __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 37165e7d70bSJung-uk Kim tsc[cpu * 3 + x] = rdtsc32(); \ 37265e7d70bSJung-uk Kim } 37365e7d70bSJung-uk Kim TSC_READ(0) 37465e7d70bSJung-uk Kim TSC_READ(1) 37565e7d70bSJung-uk Kim TSC_READ(2) 37665e7d70bSJung-uk Kim #undef TSC_READ 37765e7d70bSJung-uk Kim 37865e7d70bSJung-uk Kim #define N 1000 37965e7d70bSJung-uk Kim 38065e7d70bSJung-uk Kim static void 38165e7d70bSJung-uk Kim comp_smp_tsc(void *arg) 38265e7d70bSJung-uk Kim { 38365e7d70bSJung-uk Kim uint32_t *tsc; 38465e7d70bSJung-uk Kim int32_t d1, d2; 38565e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); 38665e7d70bSJung-uk Kim u_int i, j, size; 38765e7d70bSJung-uk Kim 38865e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 38965e7d70bSJung-uk Kim for (i = 0, tsc = arg; i < N; i++, tsc += size) 39065e7d70bSJung-uk Kim CPU_FOREACH(j) { 39165e7d70bSJung-uk Kim if (j == cpu) 39265e7d70bSJung-uk Kim continue; 39365e7d70bSJung-uk Kim d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 39465e7d70bSJung-uk Kim d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 39565e7d70bSJung-uk Kim if (d1 <= 0 || d2 <= 0) { 39665e7d70bSJung-uk Kim smp_tsc = 0; 39765e7d70bSJung-uk Kim return; 39865e7d70bSJung-uk Kim } 39965e7d70bSJung-uk Kim } 40065e7d70bSJung-uk Kim } 40165e7d70bSJung-uk Kim 40265e7d70bSJung-uk Kim static int 40365e7d70bSJung-uk Kim test_smp_tsc(void) 40465e7d70bSJung-uk Kim { 40565e7d70bSJung-uk Kim uint32_t *data, *tsc; 40665e7d70bSJung-uk Kim u_int i, size; 40765e7d70bSJung-uk Kim 40865e7d70bSJung-uk Kim if (!smp_tsc && !tsc_is_invariant) 40965e7d70bSJung-uk Kim return (-100); 41065e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 41165e7d70bSJung-uk Kim data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 41265e7d70bSJung-uk Kim for (i = 0, tsc = data; i < N; i++, tsc += size) 41365e7d70bSJung-uk Kim smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 41465e7d70bSJung-uk Kim smp_tsc = 1; /* XXX */ 41565e7d70bSJung-uk Kim smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 41665e7d70bSJung-uk Kim smp_no_rendevous_barrier, data); 41765e7d70bSJung-uk Kim free(data, M_TEMP); 41865e7d70bSJung-uk Kim if (bootverbose) 41965e7d70bSJung-uk Kim printf("SMP: %sed TSC synchronization test\n", 42065e7d70bSJung-uk Kim smp_tsc ? "pass" : "fail"); 42126e6537aSJung-uk Kim if (smp_tsc && tsc_is_invariant) { 42226e6537aSJung-uk Kim switch (cpu_vendor_id) { 42326e6537aSJung-uk Kim case CPU_VENDOR_AMD: 42426e6537aSJung-uk Kim /* 42526e6537aSJung-uk Kim * Starting with Family 15h processors, TSC clock 42626e6537aSJung-uk Kim * source is in the north bridge. Check whether 42726e6537aSJung-uk Kim * we have a single-socket/multi-core platform. 42826e6537aSJung-uk Kim * XXX Need more work for complex cases. 42926e6537aSJung-uk Kim */ 43026e6537aSJung-uk Kim if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 43126e6537aSJung-uk Kim (amd_feature2 & AMDID2_CMP) == 0 || 43226e6537aSJung-uk Kim smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 43326e6537aSJung-uk Kim break; 43426e6537aSJung-uk Kim return (1000); 43526e6537aSJung-uk Kim case CPU_VENDOR_INTEL: 43626e6537aSJung-uk Kim /* 43726e6537aSJung-uk Kim * XXX Assume Intel platforms have synchronized TSCs. 43826e6537aSJung-uk Kim */ 43926e6537aSJung-uk Kim return (1000); 44026e6537aSJung-uk Kim } 44126e6537aSJung-uk Kim return (800); 44226e6537aSJung-uk Kim } 44326e6537aSJung-uk Kim return (-100); 44465e7d70bSJung-uk Kim } 44565e7d70bSJung-uk Kim 44665e7d70bSJung-uk Kim #undef N 44765e7d70bSJung-uk Kim 44865e7d70bSJung-uk Kim #endif /* SMP */ 44965e7d70bSJung-uk Kim 45065e7d70bSJung-uk Kim static void 451dd7d207dSJung-uk Kim init_TSC_tc(void) 452dd7d207dSJung-uk Kim { 45395f2f098SJung-uk Kim uint64_t max_freq; 45495f2f098SJung-uk Kim int shift; 455dd7d207dSJung-uk Kim 45638b8542cSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 457dd7d207dSJung-uk Kim return; 458dd7d207dSJung-uk Kim 459dd7d207dSJung-uk Kim /* 46095f2f098SJung-uk Kim * Limit timecounter frequency to fit in an int and prevent it from 46195f2f098SJung-uk Kim * overflowing too fast. 46295f2f098SJung-uk Kim */ 46395f2f098SJung-uk Kim max_freq = UINT_MAX; 46495f2f098SJung-uk Kim 46595f2f098SJung-uk Kim /* 466dd7d207dSJung-uk Kim * We can not use the TSC if we support APM. Precise timekeeping 467dd7d207dSJung-uk Kim * on an APM'ed machine is at best a fools pursuit, since 468dd7d207dSJung-uk Kim * any and all of the time spent in various SMM code can't 469dd7d207dSJung-uk Kim * be reliably accounted for. Reading the RTC is your only 470dd7d207dSJung-uk Kim * source of reliable time info. The i8254 loses too, of course, 471dd7d207dSJung-uk Kim * but we need to have some kind of time... 472dd7d207dSJung-uk Kim * We don't know at this point whether APM is going to be used 473dd7d207dSJung-uk Kim * or not, nor when it might be activated. Play it safe. 474dd7d207dSJung-uk Kim */ 475dd7d207dSJung-uk Kim if (power_pm_get_type() == POWER_PM_TYPE_APM) { 476dd7d207dSJung-uk Kim tsc_timecounter.tc_quality = -1000; 477dd7d207dSJung-uk Kim if (bootverbose) 478dd7d207dSJung-uk Kim printf("TSC timecounter disabled: APM enabled.\n"); 47965e7d70bSJung-uk Kim goto init; 480dd7d207dSJung-uk Kim } 481dd7d207dSJung-uk Kim 482a49399a9SJung-uk Kim /* 483a49399a9SJung-uk Kim * We cannot use the TSC if it stops incrementing in deep sleep. 484a49399a9SJung-uk Kim * Currently only Intel CPUs are known for this problem unless 485a49399a9SJung-uk Kim * the invariant TSC bit is set. 486a49399a9SJung-uk Kim */ 487a49399a9SJung-uk Kim if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 488a49399a9SJung-uk Kim (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 489a49399a9SJung-uk Kim tsc_timecounter.tc_quality = -1000; 49008e1b4f4SJung-uk Kim tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 491a49399a9SJung-uk Kim if (bootverbose) 492a49399a9SJung-uk Kim printf("TSC timecounter disabled: C3 enabled.\n"); 493a49399a9SJung-uk Kim goto init; 494a49399a9SJung-uk Kim } 495a49399a9SJung-uk Kim 496dd7d207dSJung-uk Kim #ifdef SMP 497dd7d207dSJung-uk Kim /* 49865e7d70bSJung-uk Kim * We can not use the TSC in SMP mode unless the TSCs on all CPUs are 49965e7d70bSJung-uk Kim * synchronized. If the user is sure that the system has synchronized 50065e7d70bSJung-uk Kim * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value. 50195f2f098SJung-uk Kim * We also limit the frequency even lower to avoid "temporal anomalies" 5025cf8ac1bSMike Silbersack * as much as possible. The TSC seems unreliable in virtualized SMP 5035cf8ac1bSMike Silbersack * environments, so it is set to a negative quality in those cases. 504dd7d207dSJung-uk Kim */ 50595f2f098SJung-uk Kim if (smp_cpus > 1) { 5065cf8ac1bSMike Silbersack if (vm_guest != 0) { 5075cf8ac1bSMike Silbersack tsc_timecounter.tc_quality = -100; 5085cf8ac1bSMike Silbersack } else { 50965e7d70bSJung-uk Kim tsc_timecounter.tc_quality = test_smp_tsc(); 51095f2f098SJung-uk Kim max_freq >>= 8; 5115cf8ac1bSMike Silbersack } 51226e6537aSJung-uk Kim } else 513dd7d207dSJung-uk Kim #endif 51426e6537aSJung-uk Kim if (tsc_is_invariant) 51526e6537aSJung-uk Kim tsc_timecounter.tc_quality = 1000; 51626e6537aSJung-uk Kim 51765e7d70bSJung-uk Kim init: 5185df88f46SJung-uk Kim for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++) 51995f2f098SJung-uk Kim ; 52095f2f098SJung-uk Kim if (shift > 0) { 521*814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 522*814124c3SKonstantin Belousov if (cpu_vendor_id == CPU_VENDOR_AMD) { 523*814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 524*814124c3SKonstantin Belousov tsc_get_timecount_low_mfence; 525*814124c3SKonstantin Belousov } else { 526*814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 527*814124c3SKonstantin Belousov tsc_get_timecount_low_lfence; 528*814124c3SKonstantin Belousov } 529*814124c3SKonstantin Belousov } else 530bc8e4ad2SJung-uk Kim tsc_timecounter.tc_get_timecount = tsc_get_timecount_low; 53195f2f098SJung-uk Kim tsc_timecounter.tc_name = "TSC-low"; 53295f2f098SJung-uk Kim if (bootverbose) 533bc8e4ad2SJung-uk Kim printf("TSC timecounter discards lower %d bit(s)\n", 53495f2f098SJung-uk Kim shift); 53595f2f098SJung-uk Kim } 536bc34c87eSJung-uk Kim if (tsc_freq != 0) { 53795f2f098SJung-uk Kim tsc_timecounter.tc_frequency = tsc_freq >> shift; 53895f2f098SJung-uk Kim tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 539dd7d207dSJung-uk Kim tc_init(&tsc_timecounter); 540dd7d207dSJung-uk Kim } 541dd7d207dSJung-uk Kim } 54265e7d70bSJung-uk Kim SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 543dd7d207dSJung-uk Kim 544dd7d207dSJung-uk Kim /* 545dd7d207dSJung-uk Kim * When cpufreq levels change, find out about the (new) max frequency. We 546dd7d207dSJung-uk Kim * use this to update CPU accounting in case it got a lower estimate at boot. 547dd7d207dSJung-uk Kim */ 548dd7d207dSJung-uk Kim static void 549dd7d207dSJung-uk Kim tsc_levels_changed(void *arg, int unit) 550dd7d207dSJung-uk Kim { 551dd7d207dSJung-uk Kim device_t cf_dev; 552dd7d207dSJung-uk Kim struct cf_level *levels; 553dd7d207dSJung-uk Kim int count, error; 554dd7d207dSJung-uk Kim uint64_t max_freq; 555dd7d207dSJung-uk Kim 556dd7d207dSJung-uk Kim /* Only use values from the first CPU, assuming all are equal. */ 557dd7d207dSJung-uk Kim if (unit != 0) 558dd7d207dSJung-uk Kim return; 559dd7d207dSJung-uk Kim 560dd7d207dSJung-uk Kim /* Find the appropriate cpufreq device instance. */ 561dd7d207dSJung-uk Kim cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 562dd7d207dSJung-uk Kim if (cf_dev == NULL) { 563dd7d207dSJung-uk Kim printf("tsc_levels_changed() called but no cpufreq device?\n"); 564dd7d207dSJung-uk Kim return; 565dd7d207dSJung-uk Kim } 566dd7d207dSJung-uk Kim 567dd7d207dSJung-uk Kim /* Get settings from the device and find the max frequency. */ 568dd7d207dSJung-uk Kim count = 64; 569dd7d207dSJung-uk Kim levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 570dd7d207dSJung-uk Kim if (levels == NULL) 571dd7d207dSJung-uk Kim return; 572dd7d207dSJung-uk Kim error = CPUFREQ_LEVELS(cf_dev, levels, &count); 573dd7d207dSJung-uk Kim if (error == 0 && count != 0) { 574dd7d207dSJung-uk Kim max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 575dd7d207dSJung-uk Kim set_cputicker(rdtsc, max_freq, 1); 576dd7d207dSJung-uk Kim } else 577dd7d207dSJung-uk Kim printf("tsc_levels_changed: no max freq found\n"); 578dd7d207dSJung-uk Kim free(levels, M_TEMP); 579dd7d207dSJung-uk Kim } 580dd7d207dSJung-uk Kim 581dd7d207dSJung-uk Kim /* 582dd7d207dSJung-uk Kim * If the TSC timecounter is in use, veto the pending change. It may be 583dd7d207dSJung-uk Kim * possible in the future to handle a dynamically-changing timecounter rate. 584dd7d207dSJung-uk Kim */ 585dd7d207dSJung-uk Kim static void 586dd7d207dSJung-uk Kim tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 587dd7d207dSJung-uk Kim { 588dd7d207dSJung-uk Kim 589dd7d207dSJung-uk Kim if (*status != 0 || timecounter != &tsc_timecounter) 590dd7d207dSJung-uk Kim return; 591dd7d207dSJung-uk Kim 592dd7d207dSJung-uk Kim printf("timecounter TSC must not be in use when " 593dd7d207dSJung-uk Kim "changing frequencies; change denied\n"); 594dd7d207dSJung-uk Kim *status = EBUSY; 595dd7d207dSJung-uk Kim } 596dd7d207dSJung-uk Kim 597dd7d207dSJung-uk Kim /* Update TSC freq with the value indicated by the caller. */ 598dd7d207dSJung-uk Kim static void 599dd7d207dSJung-uk Kim tsc_freq_changed(void *arg, const struct cf_level *level, int status) 600dd7d207dSJung-uk Kim { 6013453537fSJung-uk Kim uint64_t freq; 602dd7d207dSJung-uk Kim 603dd7d207dSJung-uk Kim /* If there was an error during the transition, don't do anything. */ 60479422085SJung-uk Kim if (tsc_disabled || status != 0) 605dd7d207dSJung-uk Kim return; 606dd7d207dSJung-uk Kim 607dd7d207dSJung-uk Kim /* Total setting for this level gives the new frequency in MHz. */ 6083453537fSJung-uk Kim freq = (uint64_t)level->total_set.freq * 1000000; 6093453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 61095f2f098SJung-uk Kim tsc_timecounter.tc_frequency = 61195f2f098SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 612dd7d207dSJung-uk Kim } 613dd7d207dSJung-uk Kim 614dd7d207dSJung-uk Kim static int 615dd7d207dSJung-uk Kim sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 616dd7d207dSJung-uk Kim { 617dd7d207dSJung-uk Kim int error; 618dd7d207dSJung-uk Kim uint64_t freq; 619dd7d207dSJung-uk Kim 6203453537fSJung-uk Kim freq = atomic_load_acq_64(&tsc_freq); 6213453537fSJung-uk Kim if (freq == 0) 622dd7d207dSJung-uk Kim return (EOPNOTSUPP); 623cbc134adSMatthew D Fleming error = sysctl_handle_64(oidp, &freq, 0, req); 6247ebbcb21SJung-uk Kim if (error == 0 && req->newptr != NULL) { 6253453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 626bc8e4ad2SJung-uk Kim atomic_store_rel_64(&tsc_timecounter.tc_frequency, 627bc8e4ad2SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 6287ebbcb21SJung-uk Kim } 629dd7d207dSJung-uk Kim return (error); 630dd7d207dSJung-uk Kim } 631dd7d207dSJung-uk Kim 632cbc134adSMatthew D Fleming SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 6335331d61dSJung-uk Kim 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 634dd7d207dSJung-uk Kim 635727c7b2dSJung-uk Kim static u_int 63695f2f098SJung-uk Kim tsc_get_timecount(struct timecounter *tc __unused) 637dd7d207dSJung-uk Kim { 638727c7b2dSJung-uk Kim 639727c7b2dSJung-uk Kim return (rdtsc32()); 640dd7d207dSJung-uk Kim } 64195f2f098SJung-uk Kim 642*814124c3SKonstantin Belousov static inline u_int 643bc8e4ad2SJung-uk Kim tsc_get_timecount_low(struct timecounter *tc) 64495f2f098SJung-uk Kim { 6455df88f46SJung-uk Kim uint32_t rv; 64695f2f098SJung-uk Kim 6475df88f46SJung-uk Kim __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 6485df88f46SJung-uk Kim : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 6495df88f46SJung-uk Kim return (rv); 65095f2f098SJung-uk Kim } 651aea81038SKonstantin Belousov 652*814124c3SKonstantin Belousov static u_int 653*814124c3SKonstantin Belousov tsc_get_timecount_lfence(struct timecounter *tc __unused) 654*814124c3SKonstantin Belousov { 655*814124c3SKonstantin Belousov 656*814124c3SKonstantin Belousov lfence(); 657*814124c3SKonstantin Belousov return (rdtsc32()); 658*814124c3SKonstantin Belousov } 659*814124c3SKonstantin Belousov 660*814124c3SKonstantin Belousov static u_int 661*814124c3SKonstantin Belousov tsc_get_timecount_low_lfence(struct timecounter *tc) 662*814124c3SKonstantin Belousov { 663*814124c3SKonstantin Belousov 664*814124c3SKonstantin Belousov lfence(); 665*814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 666*814124c3SKonstantin Belousov } 667*814124c3SKonstantin Belousov 668*814124c3SKonstantin Belousov static u_int 669*814124c3SKonstantin Belousov tsc_get_timecount_mfence(struct timecounter *tc __unused) 670*814124c3SKonstantin Belousov { 671*814124c3SKonstantin Belousov 672*814124c3SKonstantin Belousov mfence(); 673*814124c3SKonstantin Belousov return (rdtsc32()); 674*814124c3SKonstantin Belousov } 675*814124c3SKonstantin Belousov 676*814124c3SKonstantin Belousov static u_int 677*814124c3SKonstantin Belousov tsc_get_timecount_low_mfence(struct timecounter *tc) 678*814124c3SKonstantin Belousov { 679*814124c3SKonstantin Belousov 680*814124c3SKonstantin Belousov mfence(); 681*814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 682*814124c3SKonstantin Belousov } 683*814124c3SKonstantin Belousov 684aea81038SKonstantin Belousov uint32_t 685aea81038SKonstantin Belousov cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th) 686aea81038SKonstantin Belousov { 687aea81038SKonstantin Belousov 688aea81038SKonstantin Belousov vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 689aea81038SKonstantin Belousov bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 690aea81038SKonstantin Belousov return (timecounter == &tsc_timecounter); 691aea81038SKonstantin Belousov } 692aea81038SKonstantin Belousov 693aea81038SKonstantin Belousov #ifdef COMPAT_FREEBSD32 694aea81038SKonstantin Belousov uint32_t 695aea81038SKonstantin Belousov cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 696aea81038SKonstantin Belousov { 697aea81038SKonstantin Belousov 698aea81038SKonstantin Belousov vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 699aea81038SKonstantin Belousov bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 700aea81038SKonstantin Belousov return (timecounter == &tsc_timecounter); 701aea81038SKonstantin Belousov } 702aea81038SKonstantin Belousov #endif 703