1dd7d207dSJung-uk Kim /*- 2ebf5747bSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3ebf5747bSPedro F. Giffuni * 4dd7d207dSJung-uk Kim * Copyright (c) 1998-2003 Poul-Henning Kamp 5dd7d207dSJung-uk Kim * All rights reserved. 6dd7d207dSJung-uk Kim * 7dd7d207dSJung-uk Kim * Redistribution and use in source and binary forms, with or without 8dd7d207dSJung-uk Kim * modification, are permitted provided that the following conditions 9dd7d207dSJung-uk Kim * are met: 10dd7d207dSJung-uk Kim * 1. Redistributions of source code must retain the above copyright 11dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer. 12dd7d207dSJung-uk Kim * 2. Redistributions in binary form must reproduce the above copyright 13dd7d207dSJung-uk Kim * notice, this list of conditions and the following disclaimer in the 14dd7d207dSJung-uk Kim * documentation and/or other materials provided with the distribution. 15dd7d207dSJung-uk Kim * 16dd7d207dSJung-uk Kim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17dd7d207dSJung-uk Kim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18dd7d207dSJung-uk Kim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19dd7d207dSJung-uk Kim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20dd7d207dSJung-uk Kim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21dd7d207dSJung-uk Kim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22dd7d207dSJung-uk Kim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23dd7d207dSJung-uk Kim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24dd7d207dSJung-uk Kim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25dd7d207dSJung-uk Kim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26dd7d207dSJung-uk Kim * SUCH DAMAGE. 27dd7d207dSJung-uk Kim */ 28dd7d207dSJung-uk Kim 29dd7d207dSJung-uk Kim #include <sys/cdefs.h> 30dd7d207dSJung-uk Kim __FBSDID("$FreeBSD$"); 31dd7d207dSJung-uk Kim 32dd7d207dSJung-uk Kim #include "opt_clock.h" 33dd7d207dSJung-uk Kim 34dd7d207dSJung-uk Kim #include <sys/param.h> 35dd7d207dSJung-uk Kim #include <sys/bus.h> 36dd7d207dSJung-uk Kim #include <sys/cpu.h> 375da5812bSJung-uk Kim #include <sys/limits.h> 38dd7d207dSJung-uk Kim #include <sys/malloc.h> 39dd7d207dSJung-uk Kim #include <sys/systm.h> 40dd7d207dSJung-uk Kim #include <sys/sysctl.h> 41dd7d207dSJung-uk Kim #include <sys/time.h> 42dd7d207dSJung-uk Kim #include <sys/timetc.h> 43dd7d207dSJung-uk Kim #include <sys/kernel.h> 44dd7d207dSJung-uk Kim #include <sys/power.h> 45dd7d207dSJung-uk Kim #include <sys/smp.h> 46aea81038SKonstantin Belousov #include <sys/vdso.h> 47dd7d207dSJung-uk Kim #include <machine/clock.h> 48dd7d207dSJung-uk Kim #include <machine/cputypes.h> 49dd7d207dSJung-uk Kim #include <machine/md_var.h> 50dd7d207dSJung-uk Kim #include <machine/specialreg.h> 5101e1933dSJohn Baldwin #include <x86/vmware.h> 5216808549SKonstantin Belousov #include <dev/acpica/acpi_hpet.h> 53dd7d207dSJung-uk Kim 54dd7d207dSJung-uk Kim #include "cpufreq_if.h" 55dd7d207dSJung-uk Kim 56dd7d207dSJung-uk Kim uint64_t tsc_freq; 57dd7d207dSJung-uk Kim int tsc_is_invariant; 58155094d7SJung-uk Kim int tsc_perf_stat; 59155094d7SJung-uk Kim 60dd7d207dSJung-uk Kim static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 61dd7d207dSJung-uk Kim 62dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 63dd7d207dSJung-uk Kim &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 64dd7d207dSJung-uk Kim 65dd7d207dSJung-uk Kim #ifdef SMP 661472b87fSNeel Natu int smp_tsc; 67dd7d207dSJung-uk Kim SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 68dd7d207dSJung-uk Kim "Indicates whether the TSC is safe to use in SMP mode"); 69b2c63698SAlexander Motin 70b2c63698SAlexander Motin int smp_tsc_adjust = 0; 71b2c63698SAlexander Motin SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 72b2c63698SAlexander Motin &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 73dd7d207dSJung-uk Kim #endif 74dd7d207dSJung-uk Kim 75e7f1427dSKonstantin Belousov static int tsc_shift = 1; 76e7f1427dSKonstantin Belousov SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 77e7f1427dSKonstantin Belousov &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 78e7f1427dSKonstantin Belousov 7979422085SJung-uk Kim static int tsc_disabled; 8079422085SJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 8179422085SJung-uk Kim "Disable x86 Time Stamp Counter"); 8279422085SJung-uk Kim 83a4e4127fSJung-uk Kim static int tsc_skip_calibration; 84a4e4127fSJung-uk Kim SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 85a4e4127fSJung-uk Kim &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 86a4e4127fSJung-uk Kim 87dd7d207dSJung-uk Kim static void tsc_freq_changed(void *arg, const struct cf_level *level, 88dd7d207dSJung-uk Kim int status); 89dd7d207dSJung-uk Kim static void tsc_freq_changing(void *arg, const struct cf_level *level, 90dd7d207dSJung-uk Kim int *status); 91dd7d207dSJung-uk Kim static unsigned tsc_get_timecount(struct timecounter *tc); 92814124c3SKonstantin Belousov static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 93814124c3SKonstantin Belousov static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 94814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 95814124c3SKonstantin Belousov static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 96814124c3SKonstantin Belousov static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 97dd7d207dSJung-uk Kim static void tsc_levels_changed(void *arg, int unit); 9816808549SKonstantin Belousov static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, 9916808549SKonstantin Belousov struct timecounter *tc); 10016808549SKonstantin Belousov #ifdef COMPAT_FREEBSD32 10116808549SKonstantin Belousov static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 10216808549SKonstantin Belousov struct timecounter *tc); 10316808549SKonstantin Belousov #endif 104dd7d207dSJung-uk Kim 105dd7d207dSJung-uk Kim static struct timecounter tsc_timecounter = { 10616808549SKonstantin Belousov .tc_get_timecount = tsc_get_timecount, 10716808549SKonstantin Belousov .tc_counter_mask = ~0u, 10816808549SKonstantin Belousov .tc_name = "TSC", 10916808549SKonstantin Belousov .tc_quality = 800, /* adjusted in code */ 11016808549SKonstantin Belousov .tc_fill_vdso_timehands = x86_tsc_vdso_timehands, 11116808549SKonstantin Belousov #ifdef COMPAT_FREEBSD32 11216808549SKonstantin Belousov .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32, 11316808549SKonstantin Belousov #endif 114dd7d207dSJung-uk Kim }; 115dd7d207dSJung-uk Kim 11601e1933dSJohn Baldwin static void 1175da5812bSJung-uk Kim tsc_freq_vmware(void) 1185da5812bSJung-uk Kim { 1195da5812bSJung-uk Kim u_int regs[4]; 1205da5812bSJung-uk Kim 1215da5812bSJung-uk Kim if (hv_high >= 0x40000010) { 1225da5812bSJung-uk Kim do_cpuid(0x40000010, regs); 1235da5812bSJung-uk Kim tsc_freq = regs[0] * 1000; 1245da5812bSJung-uk Kim } else { 1255da5812bSJung-uk Kim vmware_hvcall(VMW_HVCMD_GETHZ, regs); 1265da5812bSJung-uk Kim if (regs[1] != UINT_MAX) 1275da5812bSJung-uk Kim tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 1285da5812bSJung-uk Kim } 1295da5812bSJung-uk Kim tsc_is_invariant = 1; 1305da5812bSJung-uk Kim } 1315da5812bSJung-uk Kim 132*506a906cSKonstantin Belousov /* 133*506a906cSKonstantin Belousov * Calculate TSC frequency using information from the CPUID leaf 0x15 134*506a906cSKonstantin Belousov * 'Time Stamp Counter and Nominal Core Crystal Clock'. It should be 135*506a906cSKonstantin Belousov * an improvement over the parsing of the CPU model name in 136*506a906cSKonstantin Belousov * tsc_freq_intel(), when available. 137*506a906cSKonstantin Belousov */ 138*506a906cSKonstantin Belousov static bool 139*506a906cSKonstantin Belousov tsc_freq_cpuid(void) 140*506a906cSKonstantin Belousov { 141*506a906cSKonstantin Belousov u_int regs[4]; 142*506a906cSKonstantin Belousov 143*506a906cSKonstantin Belousov if (cpu_high < 0x15) 144*506a906cSKonstantin Belousov return (false); 145*506a906cSKonstantin Belousov do_cpuid(0x15, regs); 146*506a906cSKonstantin Belousov if (regs[0] == 0 || regs[1] == 0 || regs[2] == 0) 147*506a906cSKonstantin Belousov return (false); 148*506a906cSKonstantin Belousov tsc_freq = (uint64_t)regs[2] * regs[1] / regs[0]; 149*506a906cSKonstantin Belousov return (true); 150*506a906cSKonstantin Belousov } 151*506a906cSKonstantin Belousov 152a4e4127fSJung-uk Kim static void 153a4e4127fSJung-uk Kim tsc_freq_intel(void) 154dd7d207dSJung-uk Kim { 155a4e4127fSJung-uk Kim char brand[48]; 156a4e4127fSJung-uk Kim u_int regs[4]; 157a4e4127fSJung-uk Kim uint64_t freq; 158a4e4127fSJung-uk Kim char *p; 159a4e4127fSJung-uk Kim u_int i; 160dd7d207dSJung-uk Kim 161a4e4127fSJung-uk Kim /* 162a4e4127fSJung-uk Kim * Intel Processor Identification and the CPUID Instruction 163a4e4127fSJung-uk Kim * Application Note 485. 164a4e4127fSJung-uk Kim * http://www.intel.com/assets/pdf/appnote/241618.pdf 165a4e4127fSJung-uk Kim */ 166a4e4127fSJung-uk Kim if (cpu_exthigh >= 0x80000004) { 167a4e4127fSJung-uk Kim p = brand; 168a4e4127fSJung-uk Kim for (i = 0x80000002; i < 0x80000005; i++) { 169a4e4127fSJung-uk Kim do_cpuid(i, regs); 170a4e4127fSJung-uk Kim memcpy(p, regs, sizeof(regs)); 171a4e4127fSJung-uk Kim p += sizeof(regs); 172a4e4127fSJung-uk Kim } 173a4e4127fSJung-uk Kim p = NULL; 174a4e4127fSJung-uk Kim for (i = 0; i < sizeof(brand) - 1; i++) 175a4e4127fSJung-uk Kim if (brand[i] == 'H' && brand[i + 1] == 'z') 176a4e4127fSJung-uk Kim p = brand + i; 177a4e4127fSJung-uk Kim if (p != NULL) { 178a4e4127fSJung-uk Kim p -= 5; 179a4e4127fSJung-uk Kim switch (p[4]) { 180a4e4127fSJung-uk Kim case 'M': 181a4e4127fSJung-uk Kim i = 1; 182a4e4127fSJung-uk Kim break; 183a4e4127fSJung-uk Kim case 'G': 184a4e4127fSJung-uk Kim i = 1000; 185a4e4127fSJung-uk Kim break; 186a4e4127fSJung-uk Kim case 'T': 187a4e4127fSJung-uk Kim i = 1000000; 188a4e4127fSJung-uk Kim break; 189a4e4127fSJung-uk Kim default: 190dd7d207dSJung-uk Kim return; 191a4e4127fSJung-uk Kim } 192a4e4127fSJung-uk Kim #define C2D(c) ((c) - '0') 193a4e4127fSJung-uk Kim if (p[1] == '.') { 194a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 195a4e4127fSJung-uk Kim freq += C2D(p[2]) * 100; 196a4e4127fSJung-uk Kim freq += C2D(p[3]) * 10; 197a4e4127fSJung-uk Kim freq *= i * 1000; 198a4e4127fSJung-uk Kim } else { 199a4e4127fSJung-uk Kim freq = C2D(p[0]) * 1000; 200a4e4127fSJung-uk Kim freq += C2D(p[1]) * 100; 201a4e4127fSJung-uk Kim freq += C2D(p[2]) * 10; 202a4e4127fSJung-uk Kim freq += C2D(p[3]); 203a4e4127fSJung-uk Kim freq *= i * 1000000; 204a4e4127fSJung-uk Kim } 205a4e4127fSJung-uk Kim #undef C2D 206a4e4127fSJung-uk Kim tsc_freq = freq; 207a4e4127fSJung-uk Kim } 208a4e4127fSJung-uk Kim } 209a4e4127fSJung-uk Kim } 210dd7d207dSJung-uk Kim 211a4e4127fSJung-uk Kim static void 212a4e4127fSJung-uk Kim probe_tsc_freq(void) 213a4e4127fSJung-uk Kim { 214155094d7SJung-uk Kim u_int regs[4]; 215a4e4127fSJung-uk Kim uint64_t tsc1, tsc2; 216dd7d207dSJung-uk Kim 2175da5812bSJung-uk Kim if (cpu_high >= 6) { 2185da5812bSJung-uk Kim do_cpuid(6, regs); 2195da5812bSJung-uk Kim if ((regs[2] & CPUID_PERF_STAT) != 0) { 2205da5812bSJung-uk Kim /* 2215da5812bSJung-uk Kim * XXX Some emulators expose host CPUID without actual 2225da5812bSJung-uk Kim * support for these MSRs. We must test whether they 2235da5812bSJung-uk Kim * really work. 2245da5812bSJung-uk Kim */ 2255da5812bSJung-uk Kim wrmsr(MSR_MPERF, 0); 2265da5812bSJung-uk Kim wrmsr(MSR_APERF, 0); 2275da5812bSJung-uk Kim DELAY(10); 2285da5812bSJung-uk Kim if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 2295da5812bSJung-uk Kim tsc_perf_stat = 1; 2305da5812bSJung-uk Kim } 2315da5812bSJung-uk Kim } 2325da5812bSJung-uk Kim 23301e1933dSJohn Baldwin if (vm_guest == VM_GUEST_VMWARE) { 23401e1933dSJohn Baldwin tsc_freq_vmware(); 2355da5812bSJung-uk Kim return; 23601e1933dSJohn Baldwin } 2375da5812bSJung-uk Kim 238dd7d207dSJung-uk Kim switch (cpu_vendor_id) { 239dd7d207dSJung-uk Kim case CPU_VENDOR_AMD: 240a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 241a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 242a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) >= 0x10)) 243dd7d207dSJung-uk Kim tsc_is_invariant = 1; 244814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 245814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 246814124c3SKonstantin Belousov tsc_get_timecount_mfence; 247814124c3SKonstantin Belousov } 248dd7d207dSJung-uk Kim break; 249dd7d207dSJung-uk Kim case CPU_VENDOR_INTEL: 250a106a27cSJung-uk Kim if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 251a106a27cSJung-uk Kim (vm_guest == VM_GUEST_NO && 252a106a27cSJung-uk Kim ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 253dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xe) || 254dd7d207dSJung-uk Kim (CPUID_TO_FAMILY(cpu_id) == 0xf && 255a106a27cSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0x3)))) 256dd7d207dSJung-uk Kim tsc_is_invariant = 1; 257814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 258814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 259814124c3SKonstantin Belousov tsc_get_timecount_lfence; 260814124c3SKonstantin Belousov } 261dd7d207dSJung-uk Kim break; 262dd7d207dSJung-uk Kim case CPU_VENDOR_CENTAUR: 263a106a27cSJung-uk Kim if (vm_guest == VM_GUEST_NO && 264a106a27cSJung-uk Kim CPUID_TO_FAMILY(cpu_id) == 0x6 && 265dd7d207dSJung-uk Kim CPUID_TO_MODEL(cpu_id) >= 0xf && 266dd7d207dSJung-uk Kim (rdmsr(0x1203) & 0x100000000ULL) == 0) 267dd7d207dSJung-uk Kim tsc_is_invariant = 1; 268814124c3SKonstantin Belousov if (cpu_feature & CPUID_SSE2) { 269814124c3SKonstantin Belousov tsc_timecounter.tc_get_timecount = 270814124c3SKonstantin Belousov tsc_get_timecount_lfence; 271814124c3SKonstantin Belousov } 272dd7d207dSJung-uk Kim break; 273dd7d207dSJung-uk Kim } 274dd7d207dSJung-uk Kim 275a4e4127fSJung-uk Kim if (tsc_skip_calibration) { 276*506a906cSKonstantin Belousov if (tsc_freq_cpuid()) 277*506a906cSKonstantin Belousov ; 278*506a906cSKonstantin Belousov else if (cpu_vendor_id == CPU_VENDOR_INTEL) 279a4e4127fSJung-uk Kim tsc_freq_intel(); 280*506a906cSKonstantin Belousov } else { 281a4e4127fSJung-uk Kim if (bootverbose) 282a4e4127fSJung-uk Kim printf("Calibrating TSC clock ... "); 283a4e4127fSJung-uk Kim tsc1 = rdtsc(); 284a4e4127fSJung-uk Kim DELAY(1000000); 285a4e4127fSJung-uk Kim tsc2 = rdtsc(); 286a4e4127fSJung-uk Kim tsc_freq = tsc2 - tsc1; 287*506a906cSKonstantin Belousov } 288a4e4127fSJung-uk Kim if (bootverbose) 289a4e4127fSJung-uk Kim printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 290a4e4127fSJung-uk Kim } 291a4e4127fSJung-uk Kim 292a4e4127fSJung-uk Kim void 293a4e4127fSJung-uk Kim init_TSC(void) 294a4e4127fSJung-uk Kim { 295a4e4127fSJung-uk Kim 296a4e4127fSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 297a4e4127fSJung-uk Kim return; 298a4e4127fSJung-uk Kim 299fe760cfaSJohn Baldwin #ifdef __i386__ 300fe760cfaSJohn Baldwin /* The TSC is known to be broken on certain CPUs. */ 301fe760cfaSJohn Baldwin switch (cpu_vendor_id) { 302fe760cfaSJohn Baldwin case CPU_VENDOR_AMD: 303fe760cfaSJohn Baldwin switch (cpu_id & 0xFF0) { 304fe760cfaSJohn Baldwin case 0x500: 305fe760cfaSJohn Baldwin /* K5 Model 0 */ 306fe760cfaSJohn Baldwin return; 307fe760cfaSJohn Baldwin } 308fe760cfaSJohn Baldwin break; 309fe760cfaSJohn Baldwin case CPU_VENDOR_CENTAUR: 310fe760cfaSJohn Baldwin switch (cpu_id & 0xff0) { 311fe760cfaSJohn Baldwin case 0x540: 312fe760cfaSJohn Baldwin /* 313fe760cfaSJohn Baldwin * http://www.centtech.com/c6_data_sheet.pdf 314fe760cfaSJohn Baldwin * 315fe760cfaSJohn Baldwin * I-12 RDTSC may return incoherent values in EDX:EAX 316fe760cfaSJohn Baldwin * I-13 RDTSC hangs when certain event counters are used 317fe760cfaSJohn Baldwin */ 318fe760cfaSJohn Baldwin return; 319fe760cfaSJohn Baldwin } 320fe760cfaSJohn Baldwin break; 321fe760cfaSJohn Baldwin case CPU_VENDOR_NSC: 322fe760cfaSJohn Baldwin switch (cpu_id & 0xff0) { 323fe760cfaSJohn Baldwin case 0x540: 324fe760cfaSJohn Baldwin if ((cpu_id & CPUID_STEPPING) == 0) 325fe760cfaSJohn Baldwin return; 326fe760cfaSJohn Baldwin break; 327fe760cfaSJohn Baldwin } 328fe760cfaSJohn Baldwin break; 329fe760cfaSJohn Baldwin } 330fe760cfaSJohn Baldwin #endif 331fe760cfaSJohn Baldwin 332a4e4127fSJung-uk Kim probe_tsc_freq(); 333a4e4127fSJung-uk Kim 334dd7d207dSJung-uk Kim /* 335dd7d207dSJung-uk Kim * Inform CPU accounting about our boot-time clock rate. This will 336dd7d207dSJung-uk Kim * be updated if someone loads a cpufreq driver after boot that 337dd7d207dSJung-uk Kim * discovers a new max frequency. 338dd7d207dSJung-uk Kim */ 339a4e4127fSJung-uk Kim if (tsc_freq != 0) 3405ac44f72SJung-uk Kim set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 341dd7d207dSJung-uk Kim 342dd7d207dSJung-uk Kim if (tsc_is_invariant) 343dd7d207dSJung-uk Kim return; 344dd7d207dSJung-uk Kim 345dd7d207dSJung-uk Kim /* Register to find out about changes in CPU frequency. */ 346dd7d207dSJung-uk Kim tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 347dd7d207dSJung-uk Kim tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 348dd7d207dSJung-uk Kim tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 349dd7d207dSJung-uk Kim tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 350dd7d207dSJung-uk Kim tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 351dd7d207dSJung-uk Kim tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 352dd7d207dSJung-uk Kim } 353dd7d207dSJung-uk Kim 35465e7d70bSJung-uk Kim #ifdef SMP 35565e7d70bSJung-uk Kim 356814124c3SKonstantin Belousov /* 357814124c3SKonstantin Belousov * RDTSC is not a serializing instruction, and does not drain 358814124c3SKonstantin Belousov * instruction stream, so we need to drain the stream before executing 359814124c3SKonstantin Belousov * it. It could be fixed by use of RDTSCP, except the instruction is 360814124c3SKonstantin Belousov * not available everywhere. 361814124c3SKonstantin Belousov * 362814124c3SKonstantin Belousov * Use CPUID for draining in the boot-time SMP constistency test. The 363814124c3SKonstantin Belousov * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 364814124c3SKonstantin Belousov * and VIA) when SSE2 is present, and nothing on older machines which 365814124c3SKonstantin Belousov * also do not issue RDTSC prematurely. There, testing for SSE2 and 366e1a18e46SKonstantin Belousov * vendor is too cumbersome, and we learn about TSC presence from CPUID. 367814124c3SKonstantin Belousov * 368814124c3SKonstantin Belousov * Do not use do_cpuid(), since we do not need CPUID results, which 369814124c3SKonstantin Belousov * have to be written into memory with do_cpuid(). 370814124c3SKonstantin Belousov */ 37165e7d70bSJung-uk Kim #define TSC_READ(x) \ 37265e7d70bSJung-uk Kim static void \ 37365e7d70bSJung-uk Kim tsc_read_##x(void *arg) \ 37465e7d70bSJung-uk Kim { \ 3757bfcb3bbSJim Harris uint64_t *tsc = arg; \ 37665e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); \ 37765e7d70bSJung-uk Kim \ 378814124c3SKonstantin Belousov __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 3797bfcb3bbSJim Harris tsc[cpu * 3 + x] = rdtsc(); \ 38065e7d70bSJung-uk Kim } 38165e7d70bSJung-uk Kim TSC_READ(0) 38265e7d70bSJung-uk Kim TSC_READ(1) 38365e7d70bSJung-uk Kim TSC_READ(2) 38465e7d70bSJung-uk Kim #undef TSC_READ 38565e7d70bSJung-uk Kim 38665e7d70bSJung-uk Kim #define N 1000 38765e7d70bSJung-uk Kim 38865e7d70bSJung-uk Kim static void 38965e7d70bSJung-uk Kim comp_smp_tsc(void *arg) 39065e7d70bSJung-uk Kim { 3917bfcb3bbSJim Harris uint64_t *tsc; 3927bfcb3bbSJim Harris int64_t d1, d2; 39365e7d70bSJung-uk Kim u_int cpu = PCPU_GET(cpuid); 39465e7d70bSJung-uk Kim u_int i, j, size; 39565e7d70bSJung-uk Kim 39665e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 39765e7d70bSJung-uk Kim for (i = 0, tsc = arg; i < N; i++, tsc += size) 39865e7d70bSJung-uk Kim CPU_FOREACH(j) { 39965e7d70bSJung-uk Kim if (j == cpu) 40065e7d70bSJung-uk Kim continue; 40165e7d70bSJung-uk Kim d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 40265e7d70bSJung-uk Kim d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 40365e7d70bSJung-uk Kim if (d1 <= 0 || d2 <= 0) { 40465e7d70bSJung-uk Kim smp_tsc = 0; 40565e7d70bSJung-uk Kim return; 40665e7d70bSJung-uk Kim } 40765e7d70bSJung-uk Kim } 40865e7d70bSJung-uk Kim } 40965e7d70bSJung-uk Kim 410b2c63698SAlexander Motin static void 411b2c63698SAlexander Motin adj_smp_tsc(void *arg) 412b2c63698SAlexander Motin { 413b2c63698SAlexander Motin uint64_t *tsc; 414b2c63698SAlexander Motin int64_t d, min, max; 415b2c63698SAlexander Motin u_int cpu = PCPU_GET(cpuid); 416b2c63698SAlexander Motin u_int first, i, size; 417b2c63698SAlexander Motin 418b2c63698SAlexander Motin first = CPU_FIRST(); 419b2c63698SAlexander Motin if (cpu == first) 420b2c63698SAlexander Motin return; 421b2c63698SAlexander Motin min = INT64_MIN; 422b2c63698SAlexander Motin max = INT64_MAX; 423b2c63698SAlexander Motin size = (mp_maxid + 1) * 3; 424b2c63698SAlexander Motin for (i = 0, tsc = arg; i < N; i++, tsc += size) { 425b2c63698SAlexander Motin d = tsc[first * 3] - tsc[cpu * 3 + 1]; 426b2c63698SAlexander Motin if (d > min) 427b2c63698SAlexander Motin min = d; 428b2c63698SAlexander Motin d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 429b2c63698SAlexander Motin if (d > min) 430b2c63698SAlexander Motin min = d; 431b2c63698SAlexander Motin d = tsc[first * 3 + 1] - tsc[cpu * 3]; 432b2c63698SAlexander Motin if (d < max) 433b2c63698SAlexander Motin max = d; 434b2c63698SAlexander Motin d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 435b2c63698SAlexander Motin if (d < max) 436b2c63698SAlexander Motin max = d; 437b2c63698SAlexander Motin } 438b2c63698SAlexander Motin if (min > max) 439b2c63698SAlexander Motin return; 440b2c63698SAlexander Motin d = min / 2 + max / 2; 441b2c63698SAlexander Motin __asm __volatile ( 442b2c63698SAlexander Motin "movl $0x10, %%ecx\n\t" 443b2c63698SAlexander Motin "rdmsr\n\t" 444b2c63698SAlexander Motin "addl %%edi, %%eax\n\t" 445b2c63698SAlexander Motin "adcl %%esi, %%edx\n\t" 446b2c63698SAlexander Motin "wrmsr\n" 447b2c63698SAlexander Motin : /* No output */ 448b2c63698SAlexander Motin : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 449b2c63698SAlexander Motin : "ax", "cx", "dx", "cc" 450b2c63698SAlexander Motin ); 451b2c63698SAlexander Motin } 452b2c63698SAlexander Motin 45365e7d70bSJung-uk Kim static int 454e7f1427dSKonstantin Belousov test_tsc(void) 45565e7d70bSJung-uk Kim { 4567bfcb3bbSJim Harris uint64_t *data, *tsc; 457b2c63698SAlexander Motin u_int i, size, adj; 45865e7d70bSJung-uk Kim 459e7f1427dSKonstantin Belousov if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 46065e7d70bSJung-uk Kim return (-100); 46165e7d70bSJung-uk Kim size = (mp_maxid + 1) * 3; 46265e7d70bSJung-uk Kim data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 463b2c63698SAlexander Motin adj = 0; 464b2c63698SAlexander Motin retry: 46565e7d70bSJung-uk Kim for (i = 0, tsc = data; i < N; i++, tsc += size) 46665e7d70bSJung-uk Kim smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 46765e7d70bSJung-uk Kim smp_tsc = 1; /* XXX */ 46867d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc, 46967d955aaSPatrick Kelsey smp_no_rendezvous_barrier, data); 470b2c63698SAlexander Motin if (!smp_tsc && adj < smp_tsc_adjust) { 471b2c63698SAlexander Motin adj++; 47267d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc, 47367d955aaSPatrick Kelsey smp_no_rendezvous_barrier, data); 474b2c63698SAlexander Motin goto retry; 475b2c63698SAlexander Motin } 47665e7d70bSJung-uk Kim free(data, M_TEMP); 47765e7d70bSJung-uk Kim if (bootverbose) 478b2c63698SAlexander Motin printf("SMP: %sed TSC synchronization test%s\n", 479b2c63698SAlexander Motin smp_tsc ? "pass" : "fail", 480b2c63698SAlexander Motin adj > 0 ? " after adjustment" : ""); 48126e6537aSJung-uk Kim if (smp_tsc && tsc_is_invariant) { 48226e6537aSJung-uk Kim switch (cpu_vendor_id) { 48326e6537aSJung-uk Kim case CPU_VENDOR_AMD: 48426e6537aSJung-uk Kim /* 48526e6537aSJung-uk Kim * Starting with Family 15h processors, TSC clock 48626e6537aSJung-uk Kim * source is in the north bridge. Check whether 48726e6537aSJung-uk Kim * we have a single-socket/multi-core platform. 48826e6537aSJung-uk Kim * XXX Need more work for complex cases. 48926e6537aSJung-uk Kim */ 49026e6537aSJung-uk Kim if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 49126e6537aSJung-uk Kim (amd_feature2 & AMDID2_CMP) == 0 || 49226e6537aSJung-uk Kim smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 49326e6537aSJung-uk Kim break; 49426e6537aSJung-uk Kim return (1000); 49526e6537aSJung-uk Kim case CPU_VENDOR_INTEL: 49626e6537aSJung-uk Kim /* 49726e6537aSJung-uk Kim * XXX Assume Intel platforms have synchronized TSCs. 49826e6537aSJung-uk Kim */ 49926e6537aSJung-uk Kim return (1000); 50026e6537aSJung-uk Kim } 50126e6537aSJung-uk Kim return (800); 50226e6537aSJung-uk Kim } 50326e6537aSJung-uk Kim return (-100); 50465e7d70bSJung-uk Kim } 50565e7d70bSJung-uk Kim 50665e7d70bSJung-uk Kim #undef N 50765e7d70bSJung-uk Kim 508e7f1427dSKonstantin Belousov #else 509e7f1427dSKonstantin Belousov 510e7f1427dSKonstantin Belousov /* 511e7f1427dSKonstantin Belousov * The function is not called, it is provided to avoid linking failure 512e7f1427dSKonstantin Belousov * on uniprocessor kernel. 513e7f1427dSKonstantin Belousov */ 514e7f1427dSKonstantin Belousov static int 515e7f1427dSKonstantin Belousov test_tsc(void) 516e7f1427dSKonstantin Belousov { 517e7f1427dSKonstantin Belousov 518e7f1427dSKonstantin Belousov return (0); 519e7f1427dSKonstantin Belousov } 520e7f1427dSKonstantin Belousov 52165e7d70bSJung-uk Kim #endif /* SMP */ 52265e7d70bSJung-uk Kim 52365e7d70bSJung-uk Kim static void 524dd7d207dSJung-uk Kim init_TSC_tc(void) 525dd7d207dSJung-uk Kim { 52695f2f098SJung-uk Kim uint64_t max_freq; 52795f2f098SJung-uk Kim int shift; 528dd7d207dSJung-uk Kim 52938b8542cSJung-uk Kim if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 530dd7d207dSJung-uk Kim return; 531dd7d207dSJung-uk Kim 532dd7d207dSJung-uk Kim /* 53395f2f098SJung-uk Kim * Limit timecounter frequency to fit in an int and prevent it from 53495f2f098SJung-uk Kim * overflowing too fast. 53595f2f098SJung-uk Kim */ 53695f2f098SJung-uk Kim max_freq = UINT_MAX; 53795f2f098SJung-uk Kim 53895f2f098SJung-uk Kim /* 539dd7d207dSJung-uk Kim * We can not use the TSC if we support APM. Precise timekeeping 540dd7d207dSJung-uk Kim * on an APM'ed machine is at best a fools pursuit, since 541dd7d207dSJung-uk Kim * any and all of the time spent in various SMM code can't 542dd7d207dSJung-uk Kim * be reliably accounted for. Reading the RTC is your only 543dd7d207dSJung-uk Kim * source of reliable time info. The i8254 loses too, of course, 544dd7d207dSJung-uk Kim * but we need to have some kind of time... 545dd7d207dSJung-uk Kim * We don't know at this point whether APM is going to be used 546dd7d207dSJung-uk Kim * or not, nor when it might be activated. Play it safe. 547dd7d207dSJung-uk Kim */ 548dd7d207dSJung-uk Kim if (power_pm_get_type() == POWER_PM_TYPE_APM) { 549dd7d207dSJung-uk Kim tsc_timecounter.tc_quality = -1000; 550dd7d207dSJung-uk Kim if (bootverbose) 551dd7d207dSJung-uk Kim printf("TSC timecounter disabled: APM enabled.\n"); 55265e7d70bSJung-uk Kim goto init; 553dd7d207dSJung-uk Kim } 554dd7d207dSJung-uk Kim 555a49399a9SJung-uk Kim /* 55692597e06SJohn Baldwin * Intel CPUs without a C-state invariant TSC can stop the TSC 557d1411416SJohn Baldwin * in either C2 or C3. Disable use of C2 and C3 while using 558d1411416SJohn Baldwin * the TSC as the timecounter. The timecounter can be changed 559d1411416SJohn Baldwin * to enable C2 and C3. 560d1411416SJohn Baldwin * 561d1411416SJohn Baldwin * Note that the TSC is used as the cputicker for computing 562d1411416SJohn Baldwin * thread runtime regardless of the timecounter setting, so 563d1411416SJohn Baldwin * using an alternate timecounter and enabling C2 or C3 can 564d1411416SJohn Baldwin * result incorrect runtimes for kernel idle threads (but not 565d1411416SJohn Baldwin * for any non-idle threads). 566a49399a9SJung-uk Kim */ 5678cd59625SKonstantin Belousov if (cpu_vendor_id == CPU_VENDOR_INTEL && 568a49399a9SJung-uk Kim (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 56992597e06SJohn Baldwin tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP; 570a49399a9SJung-uk Kim if (bootverbose) 571d1411416SJohn Baldwin printf("TSC timecounter disables C2 and C3.\n"); 572a49399a9SJung-uk Kim } 573a49399a9SJung-uk Kim 574dd7d207dSJung-uk Kim /* 575e7f1427dSKonstantin Belousov * We can not use the TSC in SMP mode unless the TSCs on all CPUs 576e7f1427dSKonstantin Belousov * are synchronized. If the user is sure that the system has 577e7f1427dSKonstantin Belousov * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 578e7f1427dSKonstantin Belousov * non-zero value. The TSC seems unreliable in virtualized SMP 5795cf8ac1bSMike Silbersack * environments, so it is set to a negative quality in those cases. 580dd7d207dSJung-uk Kim */ 581e7f1427dSKonstantin Belousov if (mp_ncpus > 1) 582e7f1427dSKonstantin Belousov tsc_timecounter.tc_quality = test_tsc(); 583e7f1427dSKonstantin Belousov else if (tsc_is_invariant) 58426e6537aSJung-uk Kim tsc_timecounter.tc_quality = 1000; 585e7f1427dSKonstantin Belousov max_freq >>= tsc_shift; 58626e6537aSJung-uk Kim 58765e7d70bSJung-uk Kim init: 588e7f1427dSKonstantin Belousov for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 58995f2f098SJung-uk Kim ; 590e7f1427dSKonstantin Belousov if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 591814124c3SKonstantin Belousov if (cpu_vendor_id == CPU_VENDOR_AMD) { 592e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 593e7f1427dSKonstantin Belousov tsc_get_timecount_low_mfence : 594e7f1427dSKonstantin Belousov tsc_get_timecount_mfence; 595814124c3SKonstantin Belousov } else { 596e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 597e7f1427dSKonstantin Belousov tsc_get_timecount_low_lfence : 598e7f1427dSKonstantin Belousov tsc_get_timecount_lfence; 599814124c3SKonstantin Belousov } 600e7f1427dSKonstantin Belousov } else { 601e7f1427dSKonstantin Belousov tsc_timecounter.tc_get_timecount = shift > 0 ? 602e7f1427dSKonstantin Belousov tsc_get_timecount_low : tsc_get_timecount; 603e7f1427dSKonstantin Belousov } 604e7f1427dSKonstantin Belousov if (shift > 0) { 60595f2f098SJung-uk Kim tsc_timecounter.tc_name = "TSC-low"; 60695f2f098SJung-uk Kim if (bootverbose) 607bc8e4ad2SJung-uk Kim printf("TSC timecounter discards lower %d bit(s)\n", 60895f2f098SJung-uk Kim shift); 60995f2f098SJung-uk Kim } 610bc34c87eSJung-uk Kim if (tsc_freq != 0) { 61195f2f098SJung-uk Kim tsc_timecounter.tc_frequency = tsc_freq >> shift; 61295f2f098SJung-uk Kim tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 613dd7d207dSJung-uk Kim tc_init(&tsc_timecounter); 614dd7d207dSJung-uk Kim } 615dd7d207dSJung-uk Kim } 61665e7d70bSJung-uk Kim SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 617dd7d207dSJung-uk Kim 618dd7d207dSJung-uk Kim /* 619dd7d207dSJung-uk Kim * When cpufreq levels change, find out about the (new) max frequency. We 620dd7d207dSJung-uk Kim * use this to update CPU accounting in case it got a lower estimate at boot. 621dd7d207dSJung-uk Kim */ 622dd7d207dSJung-uk Kim static void 623dd7d207dSJung-uk Kim tsc_levels_changed(void *arg, int unit) 624dd7d207dSJung-uk Kim { 625dd7d207dSJung-uk Kim device_t cf_dev; 626dd7d207dSJung-uk Kim struct cf_level *levels; 627dd7d207dSJung-uk Kim int count, error; 628dd7d207dSJung-uk Kim uint64_t max_freq; 629dd7d207dSJung-uk Kim 630dd7d207dSJung-uk Kim /* Only use values from the first CPU, assuming all are equal. */ 631dd7d207dSJung-uk Kim if (unit != 0) 632dd7d207dSJung-uk Kim return; 633dd7d207dSJung-uk Kim 634dd7d207dSJung-uk Kim /* Find the appropriate cpufreq device instance. */ 635dd7d207dSJung-uk Kim cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 636dd7d207dSJung-uk Kim if (cf_dev == NULL) { 637dd7d207dSJung-uk Kim printf("tsc_levels_changed() called but no cpufreq device?\n"); 638dd7d207dSJung-uk Kim return; 639dd7d207dSJung-uk Kim } 640dd7d207dSJung-uk Kim 641dd7d207dSJung-uk Kim /* Get settings from the device and find the max frequency. */ 642dd7d207dSJung-uk Kim count = 64; 643dd7d207dSJung-uk Kim levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 644dd7d207dSJung-uk Kim if (levels == NULL) 645dd7d207dSJung-uk Kim return; 646dd7d207dSJung-uk Kim error = CPUFREQ_LEVELS(cf_dev, levels, &count); 647dd7d207dSJung-uk Kim if (error == 0 && count != 0) { 648dd7d207dSJung-uk Kim max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 649dd7d207dSJung-uk Kim set_cputicker(rdtsc, max_freq, 1); 650dd7d207dSJung-uk Kim } else 651dd7d207dSJung-uk Kim printf("tsc_levels_changed: no max freq found\n"); 652dd7d207dSJung-uk Kim free(levels, M_TEMP); 653dd7d207dSJung-uk Kim } 654dd7d207dSJung-uk Kim 655dd7d207dSJung-uk Kim /* 656dd7d207dSJung-uk Kim * If the TSC timecounter is in use, veto the pending change. It may be 657dd7d207dSJung-uk Kim * possible in the future to handle a dynamically-changing timecounter rate. 658dd7d207dSJung-uk Kim */ 659dd7d207dSJung-uk Kim static void 660dd7d207dSJung-uk Kim tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 661dd7d207dSJung-uk Kim { 662dd7d207dSJung-uk Kim 663dd7d207dSJung-uk Kim if (*status != 0 || timecounter != &tsc_timecounter) 664dd7d207dSJung-uk Kim return; 665dd7d207dSJung-uk Kim 666dd7d207dSJung-uk Kim printf("timecounter TSC must not be in use when " 667dd7d207dSJung-uk Kim "changing frequencies; change denied\n"); 668dd7d207dSJung-uk Kim *status = EBUSY; 669dd7d207dSJung-uk Kim } 670dd7d207dSJung-uk Kim 671dd7d207dSJung-uk Kim /* Update TSC freq with the value indicated by the caller. */ 672dd7d207dSJung-uk Kim static void 673dd7d207dSJung-uk Kim tsc_freq_changed(void *arg, const struct cf_level *level, int status) 674dd7d207dSJung-uk Kim { 6753453537fSJung-uk Kim uint64_t freq; 676dd7d207dSJung-uk Kim 677dd7d207dSJung-uk Kim /* If there was an error during the transition, don't do anything. */ 67879422085SJung-uk Kim if (tsc_disabled || status != 0) 679dd7d207dSJung-uk Kim return; 680dd7d207dSJung-uk Kim 681dd7d207dSJung-uk Kim /* Total setting for this level gives the new frequency in MHz. */ 6823453537fSJung-uk Kim freq = (uint64_t)level->total_set.freq * 1000000; 6833453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 68495f2f098SJung-uk Kim tsc_timecounter.tc_frequency = 68595f2f098SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 686dd7d207dSJung-uk Kim } 687dd7d207dSJung-uk Kim 688dd7d207dSJung-uk Kim static int 689dd7d207dSJung-uk Kim sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 690dd7d207dSJung-uk Kim { 691dd7d207dSJung-uk Kim int error; 692dd7d207dSJung-uk Kim uint64_t freq; 693dd7d207dSJung-uk Kim 6943453537fSJung-uk Kim freq = atomic_load_acq_64(&tsc_freq); 6953453537fSJung-uk Kim if (freq == 0) 696dd7d207dSJung-uk Kim return (EOPNOTSUPP); 697cbc134adSMatthew D Fleming error = sysctl_handle_64(oidp, &freq, 0, req); 6987ebbcb21SJung-uk Kim if (error == 0 && req->newptr != NULL) { 6993453537fSJung-uk Kim atomic_store_rel_64(&tsc_freq, freq); 700bc8e4ad2SJung-uk Kim atomic_store_rel_64(&tsc_timecounter.tc_frequency, 701bc8e4ad2SJung-uk Kim freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 7027ebbcb21SJung-uk Kim } 703dd7d207dSJung-uk Kim return (error); 704dd7d207dSJung-uk Kim } 705dd7d207dSJung-uk Kim 706cbc134adSMatthew D Fleming SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 7075331d61dSJung-uk Kim 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 708dd7d207dSJung-uk Kim 709727c7b2dSJung-uk Kim static u_int 71095f2f098SJung-uk Kim tsc_get_timecount(struct timecounter *tc __unused) 711dd7d207dSJung-uk Kim { 712727c7b2dSJung-uk Kim 713727c7b2dSJung-uk Kim return (rdtsc32()); 714dd7d207dSJung-uk Kim } 71595f2f098SJung-uk Kim 716814124c3SKonstantin Belousov static inline u_int 717bc8e4ad2SJung-uk Kim tsc_get_timecount_low(struct timecounter *tc) 71895f2f098SJung-uk Kim { 7195df88f46SJung-uk Kim uint32_t rv; 72095f2f098SJung-uk Kim 7215df88f46SJung-uk Kim __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 7225df88f46SJung-uk Kim : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 7235df88f46SJung-uk Kim return (rv); 72495f2f098SJung-uk Kim } 725aea81038SKonstantin Belousov 726814124c3SKonstantin Belousov static u_int 727814124c3SKonstantin Belousov tsc_get_timecount_lfence(struct timecounter *tc __unused) 728814124c3SKonstantin Belousov { 729814124c3SKonstantin Belousov 730814124c3SKonstantin Belousov lfence(); 731814124c3SKonstantin Belousov return (rdtsc32()); 732814124c3SKonstantin Belousov } 733814124c3SKonstantin Belousov 734814124c3SKonstantin Belousov static u_int 735814124c3SKonstantin Belousov tsc_get_timecount_low_lfence(struct timecounter *tc) 736814124c3SKonstantin Belousov { 737814124c3SKonstantin Belousov 738814124c3SKonstantin Belousov lfence(); 739814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 740814124c3SKonstantin Belousov } 741814124c3SKonstantin Belousov 742814124c3SKonstantin Belousov static u_int 743814124c3SKonstantin Belousov tsc_get_timecount_mfence(struct timecounter *tc __unused) 744814124c3SKonstantin Belousov { 745814124c3SKonstantin Belousov 746814124c3SKonstantin Belousov mfence(); 747814124c3SKonstantin Belousov return (rdtsc32()); 748814124c3SKonstantin Belousov } 749814124c3SKonstantin Belousov 750814124c3SKonstantin Belousov static u_int 751814124c3SKonstantin Belousov tsc_get_timecount_low_mfence(struct timecounter *tc) 752814124c3SKonstantin Belousov { 753814124c3SKonstantin Belousov 754814124c3SKonstantin Belousov mfence(); 755814124c3SKonstantin Belousov return (tsc_get_timecount_low(tc)); 756814124c3SKonstantin Belousov } 757814124c3SKonstantin Belousov 75816808549SKonstantin Belousov static uint32_t 75916808549SKonstantin Belousov x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) 760aea81038SKonstantin Belousov { 761aea81038SKonstantin Belousov 76216808549SKonstantin Belousov vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC; 763d1b1b600SNeel Natu vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv; 76416808549SKonstantin Belousov vdso_th->th_x86_hpet_idx = 0xffffffff; 765aea81038SKonstantin Belousov bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 76616808549SKonstantin Belousov return (1); 767aea81038SKonstantin Belousov } 768aea81038SKonstantin Belousov 769aea81038SKonstantin Belousov #ifdef COMPAT_FREEBSD32 77016808549SKonstantin Belousov static uint32_t 77116808549SKonstantin Belousov x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32, 772d1b1b600SNeel Natu struct timecounter *tc) 773aea81038SKonstantin Belousov { 774aea81038SKonstantin Belousov 77516808549SKonstantin Belousov vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC; 776d1b1b600SNeel Natu vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv; 77716808549SKonstantin Belousov vdso_th32->th_x86_hpet_idx = 0xffffffff; 778aea81038SKonstantin Belousov bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 77916808549SKonstantin Belousov return (1); 780aea81038SKonstantin Belousov } 781aea81038SKonstantin Belousov #endif 782