xref: /freebsd/sys/x86/x86/tsc.c (revision 5aa839c9e2c373275091b8bf529c1311d0b84d76)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998-2003 Poul-Henning Kamp
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_clock.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/cpu.h>
38 #include <sys/eventhandler.h>
39 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/sysctl.h>
44 #include <sys/time.h>
45 #include <sys/timetc.h>
46 #include <sys/kernel.h>
47 #include <sys/smp.h>
48 #include <sys/vdso.h>
49 #include <machine/clock.h>
50 #include <machine/cputypes.h>
51 #include <machine/fpu.h>
52 #include <machine/md_var.h>
53 #include <machine/specialreg.h>
54 #include <x86/vmware.h>
55 #include <dev/acpica/acpi_hpet.h>
56 #include <contrib/dev/acpica/include/acpi.h>
57 
58 #include "cpufreq_if.h"
59 
60 uint64_t	tsc_freq;
61 int		tsc_is_invariant;
62 int		tsc_perf_stat;
63 static int	tsc_early_calib_exact;
64 
65 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag;
66 
67 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN,
68     &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant");
69 
70 #ifdef SMP
71 int	smp_tsc;
72 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
73     "Indicates whether the TSC is safe to use in SMP mode");
74 
75 int	smp_tsc_adjust = 0;
76 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN,
77     &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP");
78 #endif
79 
80 static int	tsc_shift = 1;
81 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN,
82     &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency");
83 
84 static int	tsc_disabled;
85 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
86     "Disable x86 Time Stamp Counter");
87 
88 static int	tsc_skip_calibration;
89 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN,
90     &tsc_skip_calibration, 0,
91     "Disable early TSC frequency calibration");
92 
93 static void tsc_freq_changed(void *arg, const struct cf_level *level,
94     int status);
95 static void tsc_freq_changing(void *arg, const struct cf_level *level,
96     int *status);
97 static u_int tsc_get_timecount(struct timecounter *tc);
98 static inline u_int tsc_get_timecount_low(struct timecounter *tc);
99 static u_int tsc_get_timecount_lfence(struct timecounter *tc);
100 static u_int tsc_get_timecount_low_lfence(struct timecounter *tc);
101 static u_int tsc_get_timecount_mfence(struct timecounter *tc);
102 static u_int tsc_get_timecount_low_mfence(struct timecounter *tc);
103 static u_int tscp_get_timecount(struct timecounter *tc);
104 static u_int tscp_get_timecount_low(struct timecounter *tc);
105 static void tsc_levels_changed(void *arg, int unit);
106 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th,
107     struct timecounter *tc);
108 #ifdef COMPAT_FREEBSD32
109 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
110     struct timecounter *tc);
111 #endif
112 
113 static struct timecounter tsc_timecounter = {
114 	.tc_get_timecount =		tsc_get_timecount,
115 	.tc_counter_mask =		~0u,
116 	.tc_name =			"TSC",
117 	.tc_quality =			800,	/* adjusted in code */
118 	.tc_fill_vdso_timehands = 	x86_tsc_vdso_timehands,
119 #ifdef COMPAT_FREEBSD32
120 	.tc_fill_vdso_timehands32 = 	x86_tsc_vdso_timehands32,
121 #endif
122 };
123 
124 static int
125 tsc_freq_cpuid_vm(void)
126 {
127 	u_int regs[4];
128 
129 	if (vm_guest == VM_GUEST_NO)
130 		return (false);
131 	if (hv_high < 0x40000010)
132 		return (false);
133 	do_cpuid(0x40000010, regs);
134 	tsc_freq = (uint64_t)(regs[0]) * 1000;
135 	tsc_early_calib_exact = 1;
136 	return (true);
137 }
138 
139 static void
140 tsc_freq_vmware(void)
141 {
142 	u_int regs[4];
143 
144 	vmware_hvcall(VMW_HVCMD_GETHZ, regs);
145 	if (regs[1] != UINT_MAX)
146 		tsc_freq = regs[0] | ((uint64_t)regs[1] << 32);
147 	tsc_early_calib_exact = 1;
148 }
149 
150 /*
151  * Calculate TSC frequency using information from the CPUID leaf 0x15 'Time
152  * Stamp Counter and Nominal Core Crystal Clock'.  If leaf 0x15 is not
153  * functional, as it is on Skylake/Kabylake, try 0x16 'Processor Frequency
154  * Information'.  Leaf 0x16 is described in the SDM as informational only, but
155  * we can use this value until late calibration is complete.
156  */
157 static bool
158 tsc_freq_cpuid(uint64_t *res)
159 {
160 	u_int regs[4];
161 
162 	if (cpu_high < 0x15)
163 		return (false);
164 	do_cpuid(0x15, regs);
165 	if (regs[0] != 0 && regs[1] != 0 && regs[2] != 0) {
166 		*res = (uint64_t)regs[2] * regs[1] / regs[0];
167 		return (true);
168 	}
169 
170 	if (cpu_high < 0x16)
171 		return (false);
172 	do_cpuid(0x16, regs);
173 	if (regs[0] != 0) {
174 		*res = (uint64_t)regs[0] * 1000000;
175 		return (true);
176 	}
177 
178 	return (false);
179 }
180 
181 static bool
182 tsc_freq_intel_brand(uint64_t *res)
183 {
184 	char brand[48];
185 	u_int regs[4];
186 	uint64_t freq;
187 	char *p;
188 	u_int i;
189 
190 	/*
191 	 * Intel Processor Identification and the CPUID Instruction
192 	 * Application Note 485.
193 	 * http://www.intel.com/assets/pdf/appnote/241618.pdf
194 	 */
195 	if (cpu_exthigh >= 0x80000004) {
196 		p = brand;
197 		for (i = 0x80000002; i < 0x80000005; i++) {
198 			do_cpuid(i, regs);
199 			memcpy(p, regs, sizeof(regs));
200 			p += sizeof(regs);
201 		}
202 		p = NULL;
203 		for (i = 0; i < sizeof(brand) - 1; i++)
204 			if (brand[i] == 'H' && brand[i + 1] == 'z')
205 				p = brand + i;
206 		if (p != NULL) {
207 			p -= 5;
208 			switch (p[4]) {
209 			case 'M':
210 				i = 1;
211 				break;
212 			case 'G':
213 				i = 1000;
214 				break;
215 			case 'T':
216 				i = 1000000;
217 				break;
218 			default:
219 				return (false);
220 			}
221 #define	C2D(c)	((c) - '0')
222 			if (p[1] == '.') {
223 				freq = C2D(p[0]) * 1000;
224 				freq += C2D(p[2]) * 100;
225 				freq += C2D(p[3]) * 10;
226 				freq *= i * 1000;
227 			} else {
228 				freq = C2D(p[0]) * 1000;
229 				freq += C2D(p[1]) * 100;
230 				freq += C2D(p[2]) * 10;
231 				freq += C2D(p[3]);
232 				freq *= i * 1000000;
233 			}
234 #undef C2D
235 			*res = freq;
236 			return (true);
237 		}
238 	}
239 	return (false);
240 }
241 
242 static void
243 tsc_freq_8254(uint64_t *res)
244 {
245 	uint64_t tsc1, tsc2;
246 	int64_t overhead;
247 	int count, i;
248 
249 	overhead = 0;
250 	for (i = 0, count = 8; i < count; i++) {
251 		tsc1 = rdtsc_ordered();
252 		DELAY(0);
253 		tsc2 = rdtsc_ordered();
254 		if (i > 0)
255 			overhead += tsc2 - tsc1;
256 	}
257 	overhead /= count;
258 
259 	tsc1 = rdtsc_ordered();
260 	DELAY(100000);
261 	tsc2 = rdtsc_ordered();
262 	tsc_freq = (tsc2 - tsc1 - overhead) * 10;
263 }
264 
265 static void
266 probe_tsc_freq(void)
267 {
268 #ifdef __i386__
269 	/* The TSC is known to be broken on certain CPUs. */
270 	switch (cpu_vendor_id) {
271 	case CPU_VENDOR_AMD:
272 		switch (cpu_id & 0xFF0) {
273 		case 0x500:
274 			/* K5 Model 0 */
275 			tsc_disabled = 1;
276 			return;
277 		}
278 		break;
279 	case CPU_VENDOR_CENTAUR:
280 		switch (cpu_id & 0xff0) {
281 		case 0x540:
282 			/*
283 			 * http://www.centtech.com/c6_data_sheet.pdf
284 			 *
285 			 * I-12 RDTSC may return incoherent values in EDX:EAX
286 			 * I-13 RDTSC hangs when certain event counters are used
287 			 */
288 			tsc_disabled = 1;
289 			return;
290 		}
291 		break;
292 	case CPU_VENDOR_NSC:
293 		switch (cpu_id & 0xff0) {
294 		case 0x540:
295 			if ((cpu_id & CPUID_STEPPING) == 0) {
296 				tsc_disabled = 1;
297 				return;
298 			}
299 			break;
300 		}
301 		break;
302 	}
303 #endif
304 
305 	switch (cpu_vendor_id) {
306 	case CPU_VENDOR_AMD:
307 	case CPU_VENDOR_HYGON:
308 		if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
309 		    (vm_guest == VM_GUEST_NO &&
310 		    CPUID_TO_FAMILY(cpu_id) >= 0x10))
311 			tsc_is_invariant = 1;
312 		if (cpu_feature & CPUID_SSE2) {
313 			tsc_timecounter.tc_get_timecount =
314 			    tsc_get_timecount_mfence;
315 		}
316 		break;
317 	case CPU_VENDOR_INTEL:
318 		if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
319 		    (vm_guest == VM_GUEST_NO &&
320 		    ((CPUID_TO_FAMILY(cpu_id) == 0x6 &&
321 		    CPUID_TO_MODEL(cpu_id) >= 0xe) ||
322 		    (CPUID_TO_FAMILY(cpu_id) == 0xf &&
323 		    CPUID_TO_MODEL(cpu_id) >= 0x3))))
324 			tsc_is_invariant = 1;
325 		if (cpu_feature & CPUID_SSE2) {
326 			tsc_timecounter.tc_get_timecount =
327 			    tsc_get_timecount_lfence;
328 		}
329 		break;
330 	case CPU_VENDOR_CENTAUR:
331 		if (vm_guest == VM_GUEST_NO &&
332 		    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
333 		    CPUID_TO_MODEL(cpu_id) >= 0xf &&
334 		    (rdmsr(0x1203) & 0x100000000ULL) == 0)
335 			tsc_is_invariant = 1;
336 		if (cpu_feature & CPUID_SSE2) {
337 			tsc_timecounter.tc_get_timecount =
338 			    tsc_get_timecount_lfence;
339 		}
340 		break;
341 	}
342 
343 	if (tsc_freq_cpuid_vm()) {
344 		if (bootverbose)
345 			printf(
346 		    "Early TSC frequency %juHz derived from hypervisor CPUID\n",
347 			    (uintmax_t)tsc_freq);
348 	} else if (vm_guest == VM_GUEST_VMWARE) {
349 		tsc_freq_vmware();
350 		if (bootverbose)
351 			printf(
352 		    "Early TSC frequency %juHz derived from VMWare hypercall\n",
353 			    (uintmax_t)tsc_freq);
354 	} else if (tsc_freq_cpuid(&tsc_freq)) {
355 		/*
356 		 * If possible, use the value obtained from CPUID as the initial
357 		 * frequency.  This will be refined later during boot but is
358 		 * good enough for now.  The 8254 PIT is not functional on some
359 		 * newer platforms anyway, so don't delay our boot for what
360 		 * might be a garbage result.  Late calibration is required if
361 		 * the initial frequency was obtained from CPUID.16H, as the
362 		 * derived value may be off by as much as 1%.
363 		 */
364 		if (bootverbose)
365 			printf("Early TSC frequency %juHz derived from CPUID\n",
366 			    (uintmax_t)tsc_freq);
367 	} else if (tsc_skip_calibration) {
368 		/*
369 		 * Try to parse the brand string to obtain the nominal TSC
370 		 * frequency.
371 		 */
372 		if (cpu_vendor_id == CPU_VENDOR_INTEL &&
373 		    tsc_freq_intel_brand(&tsc_freq)) {
374 			if (bootverbose)
375 				printf(
376 		    "Early TSC frequency %juHz derived from brand string\n",
377 				    (uintmax_t)tsc_freq);
378 		} else {
379 			tsc_disabled = 1;
380 		}
381 	} else {
382 		/*
383 		 * Calibrate against the 8254 PIT.  This estimate will be
384 		 * refined later in tsc_calib().
385 		 */
386 		tsc_freq_8254(&tsc_freq);
387 		if (bootverbose)
388 			printf(
389 		    "Early TSC frequency %juHz calibrated from 8254 PIT\n",
390 			    (uintmax_t)tsc_freq);
391 	}
392 
393 	if (cpu_power_ecx & CPUID_PERF_STAT) {
394 		/*
395 		 * XXX Some emulators expose host CPUID without actual support
396 		 * for these MSRs.  We must test whether they really work.
397 		 */
398 		wrmsr(MSR_MPERF, 0);
399 		wrmsr(MSR_APERF, 0);
400 		DELAY(10);
401 		if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0)
402 			tsc_perf_stat = 1;
403 	}
404 }
405 
406 void
407 start_TSC(void)
408 {
409 	if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
410 		return;
411 
412 	/*
413 	 * Inform CPU accounting about our boot-time clock rate.  This will
414 	 * be updated if someone loads a cpufreq driver after boot that
415 	 * discovers a new max frequency.
416 	 *
417 	 * The frequency may also be updated after late calibration is complete;
418 	 * however, we register the TSC as the ticker now to avoid switching
419 	 * counters after much of the kernel has already booted and potentially
420 	 * sampled the CPU clock.
421 	 */
422 	if (tsc_freq != 0)
423 		set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
424 
425 	if (tsc_is_invariant)
426 		return;
427 
428 	/* Register to find out about changes in CPU frequency. */
429 	tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change,
430 	    tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST);
431 	tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change,
432 	    tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST);
433 	tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed,
434 	    tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY);
435 }
436 
437 #ifdef SMP
438 
439 /*
440  * RDTSC is not a serializing instruction, and does not drain
441  * instruction stream, so we need to drain the stream before executing
442  * it.  It could be fixed by use of RDTSCP, except the instruction is
443  * not available everywhere.
444  *
445  * Use CPUID for draining in the boot-time SMP constistency test.  The
446  * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel
447  * and VIA) when SSE2 is present, and nothing on older machines which
448  * also do not issue RDTSC prematurely.  There, testing for SSE2 and
449  * vendor is too cumbersome, and we learn about TSC presence from CPUID.
450  *
451  * Do not use do_cpuid(), since we do not need CPUID results, which
452  * have to be written into memory with do_cpuid().
453  */
454 #define	TSC_READ(x)							\
455 static void								\
456 tsc_read_##x(void *arg)							\
457 {									\
458 	uint64_t *tsc = arg;						\
459 	u_int cpu = PCPU_GET(cpuid);					\
460 									\
461 	__asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx");	\
462 	tsc[cpu * 3 + x] = rdtsc();					\
463 }
464 TSC_READ(0)
465 TSC_READ(1)
466 TSC_READ(2)
467 #undef TSC_READ
468 
469 #define	N	1000
470 
471 static void
472 comp_smp_tsc(void *arg)
473 {
474 	uint64_t *tsc;
475 	int64_t d1, d2;
476 	u_int cpu = PCPU_GET(cpuid);
477 	u_int i, j, size;
478 
479 	size = (mp_maxid + 1) * 3;
480 	for (i = 0, tsc = arg; i < N; i++, tsc += size)
481 		CPU_FOREACH(j) {
482 			if (j == cpu)
483 				continue;
484 			d1 = tsc[cpu * 3 + 1] - tsc[j * 3];
485 			d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1];
486 			if (d1 <= 0 || d2 <= 0) {
487 				smp_tsc = 0;
488 				return;
489 			}
490 		}
491 }
492 
493 static void
494 adj_smp_tsc(void *arg)
495 {
496 	uint64_t *tsc;
497 	int64_t d, min, max;
498 	u_int cpu = PCPU_GET(cpuid);
499 	u_int first, i, size;
500 
501 	first = CPU_FIRST();
502 	if (cpu == first)
503 		return;
504 	min = INT64_MIN;
505 	max = INT64_MAX;
506 	size = (mp_maxid + 1) * 3;
507 	for (i = 0, tsc = arg; i < N; i++, tsc += size) {
508 		d = tsc[first * 3] - tsc[cpu * 3 + 1];
509 		if (d > min)
510 			min = d;
511 		d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2];
512 		if (d > min)
513 			min = d;
514 		d = tsc[first * 3 + 1] - tsc[cpu * 3];
515 		if (d < max)
516 			max = d;
517 		d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1];
518 		if (d < max)
519 			max = d;
520 	}
521 	if (min > max)
522 		return;
523 	d = min / 2 + max / 2;
524 	__asm __volatile (
525 		"movl $0x10, %%ecx\n\t"
526 		"rdmsr\n\t"
527 		"addl %%edi, %%eax\n\t"
528 		"adcl %%esi, %%edx\n\t"
529 		"wrmsr\n"
530 		: /* No output */
531 		: "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32))
532 		: "ax", "cx", "dx", "cc"
533 	);
534 }
535 
536 static int
537 test_tsc(int adj_max_count)
538 {
539 	uint64_t *data, *tsc;
540 	u_int i, size, adj;
541 
542 	if ((!smp_tsc && !tsc_is_invariant))
543 		return (-100);
544 	/*
545 	 * Misbehavior of TSC under VirtualBox has been observed.  In
546 	 * particular, threads doing small (~1 second) sleeps may miss their
547 	 * wakeup and hang around in sleep state, causing hangs on shutdown.
548 	 */
549 	if (vm_guest == VM_GUEST_VBOX)
550 		return (0);
551 
552 	TSENTER();
553 	size = (mp_maxid + 1) * 3;
554 	data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK);
555 	adj = 0;
556 retry:
557 	for (i = 0, tsc = data; i < N; i++, tsc += size)
558 		smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc);
559 	smp_tsc = 1;	/* XXX */
560 	smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc,
561 	    smp_no_rendezvous_barrier, data);
562 	if (!smp_tsc && adj < adj_max_count) {
563 		adj++;
564 		smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc,
565 		    smp_no_rendezvous_barrier, data);
566 		goto retry;
567 	}
568 	free(data, M_TEMP);
569 	if (bootverbose)
570 		printf("SMP: %sed TSC synchronization test%s\n",
571 		    smp_tsc ? "pass" : "fail",
572 		    adj > 0 ? " after adjustment" : "");
573 	TSEXIT();
574 	if (smp_tsc && tsc_is_invariant) {
575 		switch (cpu_vendor_id) {
576 		case CPU_VENDOR_AMD:
577 		case CPU_VENDOR_HYGON:
578 			/*
579 			 * Processor Programming Reference (PPR) for AMD
580 			 * Family 17h states that the TSC uses a common
581 			 * reference for all sockets, cores and threads.
582 			 */
583 			if (CPUID_TO_FAMILY(cpu_id) >= 0x17)
584 				return (1000);
585 			/*
586 			 * Starting with Family 15h processors, TSC clock
587 			 * source is in the north bridge.  Check whether
588 			 * we have a single-socket/multi-core platform.
589 			 * XXX Need more work for complex cases.
590 			 */
591 			if (CPUID_TO_FAMILY(cpu_id) < 0x15 ||
592 			    (amd_feature2 & AMDID2_CMP) == 0 ||
593 			    smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1)
594 				break;
595 			return (1000);
596 		case CPU_VENDOR_INTEL:
597 			/*
598 			 * XXX Assume Intel platforms have synchronized TSCs.
599 			 */
600 			return (1000);
601 		}
602 		return (800);
603 	}
604 	return (-100);
605 }
606 
607 #undef N
608 
609 #endif /* SMP */
610 
611 static void
612 init_TSC_tc(void)
613 {
614 	uint64_t max_freq;
615 	int shift;
616 
617 	if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
618 		return;
619 
620 	/*
621 	 * Limit timecounter frequency to fit in an int and prevent it from
622 	 * overflowing too fast.
623 	 */
624 	max_freq = UINT_MAX;
625 
626 	/*
627 	 * Intel CPUs without a C-state invariant TSC can stop the TSC
628 	 * in either C2 or C3.  Disable use of C2 and C3 while using
629 	 * the TSC as the timecounter.  The timecounter can be changed
630 	 * to enable C2 and C3.
631 	 *
632 	 * Note that the TSC is used as the cputicker for computing
633 	 * thread runtime regardless of the timecounter setting, so
634 	 * using an alternate timecounter and enabling C2 or C3 can
635 	 * result incorrect runtimes for kernel idle threads (but not
636 	 * for any non-idle threads).
637 	 */
638 	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
639 	    (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
640 		tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP;
641 		if (bootverbose)
642 			printf("TSC timecounter disables C2 and C3.\n");
643 	}
644 
645 	/*
646 	 * We can not use the TSC in SMP mode unless the TSCs on all CPUs
647 	 * are synchronized.  If the user is sure that the system has
648 	 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a
649 	 * non-zero value.  The TSC seems unreliable in virtualized SMP
650 	 * environments, so it is set to a negative quality in those cases.
651 	 */
652 #ifdef SMP
653 	if (mp_ncpus > 1)
654 		tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust);
655 	else
656 #endif /* SMP */
657 	if (tsc_is_invariant)
658 		tsc_timecounter.tc_quality = 1000;
659 	max_freq >>= tsc_shift;
660 
661 	for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++)
662 		;
663 
664 	/*
665 	 * Timecounter implementation selection, top to bottom:
666 	 * - If RDTSCP is available, use RDTSCP.
667 	 * - If fence instructions are provided (SSE2), use LFENCE;RDTSC
668 	 *   on Intel, and MFENCE;RDTSC on AMD.
669 	 * - For really old CPUs, just use RDTSC.
670 	 */
671 	if ((amd_feature & AMDID_RDTSCP) != 0) {
672 		tsc_timecounter.tc_get_timecount = shift > 0 ?
673 		    tscp_get_timecount_low : tscp_get_timecount;
674 	} else if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) {
675 		if (cpu_vendor_id == CPU_VENDOR_AMD ||
676 		    cpu_vendor_id == CPU_VENDOR_HYGON) {
677 			tsc_timecounter.tc_get_timecount = shift > 0 ?
678 			    tsc_get_timecount_low_mfence :
679 			    tsc_get_timecount_mfence;
680 		} else {
681 			tsc_timecounter.tc_get_timecount = shift > 0 ?
682 			    tsc_get_timecount_low_lfence :
683 			    tsc_get_timecount_lfence;
684 		}
685 	} else {
686 		tsc_timecounter.tc_get_timecount = shift > 0 ?
687 		    tsc_get_timecount_low : tsc_get_timecount;
688 	}
689 	if (shift > 0) {
690 		tsc_timecounter.tc_name = "TSC-low";
691 		if (bootverbose)
692 			printf("TSC timecounter discards lower %d bit(s)\n",
693 			    shift);
694 	}
695 	if (tsc_freq != 0) {
696 		tsc_timecounter.tc_frequency = tsc_freq >> shift;
697 		tsc_timecounter.tc_priv = (void *)(intptr_t)shift;
698 
699 		/*
700 		 * Timecounter registration is deferred until after late
701 		 * calibration is finished.
702 		 */
703 	}
704 }
705 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL);
706 
707 static void
708 tsc_update_freq(uint64_t new_freq)
709 {
710 	atomic_store_rel_64(&tsc_freq, new_freq);
711 	atomic_store_rel_64(&tsc_timecounter.tc_frequency,
712 	    new_freq >> (int)(intptr_t)tsc_timecounter.tc_priv);
713 }
714 
715 void
716 tsc_init(void)
717 {
718 	if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
719 		return;
720 
721 	probe_tsc_freq();
722 }
723 
724 /*
725  * Perform late calibration of the TSC frequency once ACPI-based timecounters
726  * are available.  At this point timehands are not set up, so we read the
727  * highest-quality timecounter directly rather than using (s)binuptime().
728  */
729 void
730 tsc_calibrate(void)
731 {
732 	uint64_t freq;
733 
734 	if (tsc_disabled)
735 		return;
736 	if (tsc_early_calib_exact)
737 		goto calibrated;
738 
739 	fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX);
740 	freq = clockcalib(rdtsc_ordered, "TSC");
741 	fpu_kern_leave(curthread, NULL);
742 	tsc_update_freq(freq);
743 
744 calibrated:
745 	tc_init(&tsc_timecounter);
746 	set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
747 }
748 
749 void
750 resume_TSC(void)
751 {
752 #ifdef SMP
753 	int quality;
754 
755 	/* If TSC was not good on boot, it is unlikely to become good now. */
756 	if (tsc_timecounter.tc_quality < 0)
757 		return;
758 	/* Nothing to do with UP. */
759 	if (mp_ncpus < 2)
760 		return;
761 
762 	/*
763 	 * If TSC was good, a single synchronization should be enough,
764 	 * but honour smp_tsc_adjust if it's set.
765 	 */
766 	quality = test_tsc(MAX(smp_tsc_adjust, 1));
767 	if (quality != tsc_timecounter.tc_quality) {
768 		printf("TSC timecounter quality changed: %d -> %d\n",
769 		    tsc_timecounter.tc_quality, quality);
770 		tsc_timecounter.tc_quality = quality;
771 	}
772 #endif /* SMP */
773 }
774 
775 /*
776  * When cpufreq levels change, find out about the (new) max frequency.  We
777  * use this to update CPU accounting in case it got a lower estimate at boot.
778  */
779 static void
780 tsc_levels_changed(void *arg, int unit)
781 {
782 	device_t cf_dev;
783 	struct cf_level *levels;
784 	int count, error;
785 	uint64_t max_freq;
786 
787 	/* Only use values from the first CPU, assuming all are equal. */
788 	if (unit != 0)
789 		return;
790 
791 	/* Find the appropriate cpufreq device instance. */
792 	cf_dev = devclass_get_device(devclass_find("cpufreq"), unit);
793 	if (cf_dev == NULL) {
794 		printf("tsc_levels_changed() called but no cpufreq device?\n");
795 		return;
796 	}
797 
798 	/* Get settings from the device and find the max frequency. */
799 	count = 64;
800 	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
801 	if (levels == NULL)
802 		return;
803 	error = CPUFREQ_LEVELS(cf_dev, levels, &count);
804 	if (error == 0 && count != 0) {
805 		max_freq = (uint64_t)levels[0].total_set.freq * 1000000;
806 		set_cputicker(rdtsc, max_freq, 1);
807 	} else
808 		printf("tsc_levels_changed: no max freq found\n");
809 	free(levels, M_TEMP);
810 }
811 
812 /*
813  * If the TSC timecounter is in use, veto the pending change.  It may be
814  * possible in the future to handle a dynamically-changing timecounter rate.
815  */
816 static void
817 tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
818 {
819 
820 	if (*status != 0 || timecounter != &tsc_timecounter)
821 		return;
822 
823 	printf("timecounter TSC must not be in use when "
824 	    "changing frequencies; change denied\n");
825 	*status = EBUSY;
826 }
827 
828 /* Update TSC freq with the value indicated by the caller. */
829 static void
830 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
831 {
832 	uint64_t freq;
833 
834 	/* If there was an error during the transition, don't do anything. */
835 	if (tsc_disabled || status != 0)
836 		return;
837 
838 	/* Total setting for this level gives the new frequency in MHz. */
839 	freq = (uint64_t)level->total_set.freq * 1000000;
840 	tsc_update_freq(freq);
841 }
842 
843 static int
844 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
845 {
846 	int error;
847 	uint64_t freq;
848 
849 	freq = atomic_load_acq_64(&tsc_freq);
850 	if (freq == 0)
851 		return (EOPNOTSUPP);
852 	error = sysctl_handle_64(oidp, &freq, 0, req);
853 	if (error == 0 && req->newptr != NULL)
854 		tsc_update_freq(freq);
855 	return (error);
856 }
857 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq,
858     CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
859     0, 0, sysctl_machdep_tsc_freq, "QU",
860     "Time Stamp Counter frequency");
861 
862 static u_int
863 tsc_get_timecount(struct timecounter *tc __unused)
864 {
865 
866 	return (rdtsc32());
867 }
868 
869 static u_int
870 tscp_get_timecount(struct timecounter *tc __unused)
871 {
872 
873 	return (rdtscp32());
874 }
875 
876 static inline u_int
877 tsc_get_timecount_low(struct timecounter *tc)
878 {
879 	uint32_t rv;
880 
881 	__asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
882 	    : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
883 	return (rv);
884 }
885 
886 static u_int
887 tscp_get_timecount_low(struct timecounter *tc)
888 {
889 	uint32_t rv;
890 
891 	__asm __volatile("rdtscp; movl %1, %%ecx; shrd %%cl, %%edx, %0"
892 	    : "=&a" (rv) : "m" (tc->tc_priv) : "ecx", "edx");
893 	return (rv);
894 }
895 
896 static u_int
897 tsc_get_timecount_lfence(struct timecounter *tc __unused)
898 {
899 
900 	lfence();
901 	return (rdtsc32());
902 }
903 
904 static u_int
905 tsc_get_timecount_low_lfence(struct timecounter *tc)
906 {
907 
908 	lfence();
909 	return (tsc_get_timecount_low(tc));
910 }
911 
912 static u_int
913 tsc_get_timecount_mfence(struct timecounter *tc __unused)
914 {
915 
916 	mfence();
917 	return (rdtsc32());
918 }
919 
920 static u_int
921 tsc_get_timecount_low_mfence(struct timecounter *tc)
922 {
923 
924 	mfence();
925 	return (tsc_get_timecount_low(tc));
926 }
927 
928 static uint32_t
929 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
930 {
931 
932 	vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC;
933 	vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv;
934 	vdso_th->th_x86_hpet_idx = 0xffffffff;
935 	vdso_th->th_x86_pvc_last_systime = 0;
936 	vdso_th->th_x86_pvc_stable_mask = 0;
937 	bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
938 	return (1);
939 }
940 
941 #ifdef COMPAT_FREEBSD32
942 static uint32_t
943 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
944     struct timecounter *tc)
945 {
946 
947 	vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC;
948 	vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv;
949 	vdso_th32->th_x86_hpet_idx = 0xffffffff;
950 	vdso_th32->th_x86_pvc_last_systime = 0;
951 	vdso_th32->th_x86_pvc_stable_mask = 0;
952 	bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
953 	return (1);
954 }
955 #endif
956