1 /*- 2 * Copyright (c) 1998-2003 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_compat.h" 31 #include "opt_clock.h" 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/cpu.h> 36 #include <sys/limits.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 #include <sys/sysctl.h> 40 #include <sys/time.h> 41 #include <sys/timetc.h> 42 #include <sys/kernel.h> 43 #include <sys/power.h> 44 #include <sys/smp.h> 45 #include <sys/vdso.h> 46 #include <machine/clock.h> 47 #include <machine/cputypes.h> 48 #include <machine/md_var.h> 49 #include <machine/specialreg.h> 50 51 #include "cpufreq_if.h" 52 53 uint64_t tsc_freq; 54 int tsc_is_invariant; 55 int tsc_perf_stat; 56 57 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 58 59 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 60 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 61 62 #ifdef SMP 63 int smp_tsc; 64 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 65 "Indicates whether the TSC is safe to use in SMP mode"); 66 67 int smp_tsc_adjust = 0; 68 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN, 69 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP"); 70 #endif 71 72 static int tsc_shift = 1; 73 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, 74 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); 75 76 static int tsc_disabled; 77 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 78 "Disable x86 Time Stamp Counter"); 79 80 static int tsc_skip_calibration; 81 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 82 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 83 84 static void tsc_freq_changed(void *arg, const struct cf_level *level, 85 int status); 86 static void tsc_freq_changing(void *arg, const struct cf_level *level, 87 int *status); 88 static unsigned tsc_get_timecount(struct timecounter *tc); 89 static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 90 static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 91 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 92 static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 93 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 94 static void tsc_levels_changed(void *arg, int unit); 95 96 static struct timecounter tsc_timecounter = { 97 tsc_get_timecount, /* get_timecount */ 98 0, /* no poll_pps */ 99 ~0u, /* counter_mask */ 100 0, /* frequency */ 101 "TSC", /* name */ 102 800, /* quality (adjusted in code) */ 103 }; 104 105 #define VMW_HVMAGIC 0x564d5868 106 #define VMW_HVPORT 0x5658 107 #define VMW_HVCMD_GETVERSION 10 108 #define VMW_HVCMD_GETHZ 45 109 110 static __inline void 111 vmware_hvcall(u_int cmd, u_int *p) 112 { 113 114 __asm __volatile("inl %w3, %0" 115 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 116 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 117 : "memory"); 118 } 119 120 static int 121 tsc_freq_vmware(void) 122 { 123 char hv_sig[13]; 124 u_int regs[4]; 125 char *p; 126 u_int hv_high; 127 int i; 128 129 /* 130 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 131 * http://lkml.org/lkml/2008/10/1/246 132 * 133 * KB1009458: Mechanisms to determine if software is running in 134 * a VMware virtual machine 135 * http://kb.vmware.com/kb/1009458 136 */ 137 hv_high = 0; 138 if ((cpu_feature2 & CPUID2_HV) != 0) { 139 do_cpuid(0x40000000, regs); 140 hv_high = regs[0]; 141 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 142 memcpy(p, ®s[i], sizeof(regs[i])); 143 *p = '\0'; 144 if (bootverbose) { 145 /* 146 * HV vendor ID string 147 * ------------+-------------- 148 * KVM "KVMKVMKVM" 149 * Microsoft "Microsoft Hv" 150 * VMware "VMwareVMware" 151 * Xen "XenVMMXenVMM" 152 */ 153 printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 154 } 155 if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 156 return (0); 157 } else { 158 p = getenv("smbios.system.serial"); 159 if (p == NULL) 160 return (0); 161 if (strncmp(p, "VMware-", 7) != 0 && 162 strncmp(p, "VMW", 3) != 0) { 163 freeenv(p); 164 return (0); 165 } 166 freeenv(p); 167 vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 168 if (regs[1] != VMW_HVMAGIC) 169 return (0); 170 } 171 if (hv_high >= 0x40000010) { 172 do_cpuid(0x40000010, regs); 173 tsc_freq = regs[0] * 1000; 174 } else { 175 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 176 if (regs[1] != UINT_MAX) 177 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 178 } 179 tsc_is_invariant = 1; 180 return (1); 181 } 182 183 static void 184 tsc_freq_intel(void) 185 { 186 char brand[48]; 187 u_int regs[4]; 188 uint64_t freq; 189 char *p; 190 u_int i; 191 192 /* 193 * Intel Processor Identification and the CPUID Instruction 194 * Application Note 485. 195 * http://www.intel.com/assets/pdf/appnote/241618.pdf 196 */ 197 if (cpu_exthigh >= 0x80000004) { 198 p = brand; 199 for (i = 0x80000002; i < 0x80000005; i++) { 200 do_cpuid(i, regs); 201 memcpy(p, regs, sizeof(regs)); 202 p += sizeof(regs); 203 } 204 p = NULL; 205 for (i = 0; i < sizeof(brand) - 1; i++) 206 if (brand[i] == 'H' && brand[i + 1] == 'z') 207 p = brand + i; 208 if (p != NULL) { 209 p -= 5; 210 switch (p[4]) { 211 case 'M': 212 i = 1; 213 break; 214 case 'G': 215 i = 1000; 216 break; 217 case 'T': 218 i = 1000000; 219 break; 220 default: 221 return; 222 } 223 #define C2D(c) ((c) - '0') 224 if (p[1] == '.') { 225 freq = C2D(p[0]) * 1000; 226 freq += C2D(p[2]) * 100; 227 freq += C2D(p[3]) * 10; 228 freq *= i * 1000; 229 } else { 230 freq = C2D(p[0]) * 1000; 231 freq += C2D(p[1]) * 100; 232 freq += C2D(p[2]) * 10; 233 freq += C2D(p[3]); 234 freq *= i * 1000000; 235 } 236 #undef C2D 237 tsc_freq = freq; 238 } 239 } 240 } 241 242 static void 243 probe_tsc_freq(void) 244 { 245 u_int regs[4]; 246 uint64_t tsc1, tsc2; 247 248 if (cpu_high >= 6) { 249 do_cpuid(6, regs); 250 if ((regs[2] & CPUID_PERF_STAT) != 0) { 251 /* 252 * XXX Some emulators expose host CPUID without actual 253 * support for these MSRs. We must test whether they 254 * really work. 255 */ 256 wrmsr(MSR_MPERF, 0); 257 wrmsr(MSR_APERF, 0); 258 DELAY(10); 259 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 260 tsc_perf_stat = 1; 261 } 262 } 263 264 if (tsc_freq_vmware()) 265 return; 266 267 switch (cpu_vendor_id) { 268 case CPU_VENDOR_AMD: 269 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 270 (vm_guest == VM_GUEST_NO && 271 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 272 tsc_is_invariant = 1; 273 if (cpu_feature & CPUID_SSE2) { 274 tsc_timecounter.tc_get_timecount = 275 tsc_get_timecount_mfence; 276 } 277 break; 278 case CPU_VENDOR_INTEL: 279 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 280 (vm_guest == VM_GUEST_NO && 281 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 282 CPUID_TO_MODEL(cpu_id) >= 0xe) || 283 (CPUID_TO_FAMILY(cpu_id) == 0xf && 284 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 285 tsc_is_invariant = 1; 286 if (cpu_feature & CPUID_SSE2) { 287 tsc_timecounter.tc_get_timecount = 288 tsc_get_timecount_lfence; 289 } 290 break; 291 case CPU_VENDOR_CENTAUR: 292 if (vm_guest == VM_GUEST_NO && 293 CPUID_TO_FAMILY(cpu_id) == 0x6 && 294 CPUID_TO_MODEL(cpu_id) >= 0xf && 295 (rdmsr(0x1203) & 0x100000000ULL) == 0) 296 tsc_is_invariant = 1; 297 if (cpu_feature & CPUID_SSE2) { 298 tsc_timecounter.tc_get_timecount = 299 tsc_get_timecount_lfence; 300 } 301 break; 302 } 303 304 if (tsc_skip_calibration) { 305 if (cpu_vendor_id == CPU_VENDOR_INTEL) 306 tsc_freq_intel(); 307 return; 308 } 309 310 if (bootverbose) 311 printf("Calibrating TSC clock ... "); 312 tsc1 = rdtsc(); 313 DELAY(1000000); 314 tsc2 = rdtsc(); 315 tsc_freq = tsc2 - tsc1; 316 if (bootverbose) 317 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 318 } 319 320 void 321 init_TSC(void) 322 { 323 324 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 325 return; 326 327 #ifdef __i386__ 328 /* The TSC is known to be broken on certain CPUs. */ 329 switch (cpu_vendor_id) { 330 case CPU_VENDOR_AMD: 331 switch (cpu_id & 0xFF0) { 332 case 0x500: 333 /* K5 Model 0 */ 334 return; 335 } 336 break; 337 case CPU_VENDOR_CENTAUR: 338 switch (cpu_id & 0xff0) { 339 case 0x540: 340 /* 341 * http://www.centtech.com/c6_data_sheet.pdf 342 * 343 * I-12 RDTSC may return incoherent values in EDX:EAX 344 * I-13 RDTSC hangs when certain event counters are used 345 */ 346 return; 347 } 348 break; 349 case CPU_VENDOR_NSC: 350 switch (cpu_id & 0xff0) { 351 case 0x540: 352 if ((cpu_id & CPUID_STEPPING) == 0) 353 return; 354 break; 355 } 356 break; 357 } 358 #endif 359 360 probe_tsc_freq(); 361 362 /* 363 * Inform CPU accounting about our boot-time clock rate. This will 364 * be updated if someone loads a cpufreq driver after boot that 365 * discovers a new max frequency. 366 */ 367 if (tsc_freq != 0) 368 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 369 370 if (tsc_is_invariant) 371 return; 372 373 /* Register to find out about changes in CPU frequency. */ 374 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 375 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 376 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 377 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 378 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 379 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 380 } 381 382 #ifdef SMP 383 384 /* 385 * RDTSC is not a serializing instruction, and does not drain 386 * instruction stream, so we need to drain the stream before executing 387 * it. It could be fixed by use of RDTSCP, except the instruction is 388 * not available everywhere. 389 * 390 * Use CPUID for draining in the boot-time SMP constistency test. The 391 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 392 * and VIA) when SSE2 is present, and nothing on older machines which 393 * also do not issue RDTSC prematurely. There, testing for SSE2 and 394 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 395 * 396 * Do not use do_cpuid(), since we do not need CPUID results, which 397 * have to be written into memory with do_cpuid(). 398 */ 399 #define TSC_READ(x) \ 400 static void \ 401 tsc_read_##x(void *arg) \ 402 { \ 403 uint64_t *tsc = arg; \ 404 u_int cpu = PCPU_GET(cpuid); \ 405 \ 406 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 407 tsc[cpu * 3 + x] = rdtsc(); \ 408 } 409 TSC_READ(0) 410 TSC_READ(1) 411 TSC_READ(2) 412 #undef TSC_READ 413 414 #define N 1000 415 416 static void 417 comp_smp_tsc(void *arg) 418 { 419 uint64_t *tsc; 420 int64_t d1, d2; 421 u_int cpu = PCPU_GET(cpuid); 422 u_int i, j, size; 423 424 size = (mp_maxid + 1) * 3; 425 for (i = 0, tsc = arg; i < N; i++, tsc += size) 426 CPU_FOREACH(j) { 427 if (j == cpu) 428 continue; 429 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 430 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 431 if (d1 <= 0 || d2 <= 0) { 432 smp_tsc = 0; 433 return; 434 } 435 } 436 } 437 438 static void 439 adj_smp_tsc(void *arg) 440 { 441 uint64_t *tsc; 442 int64_t d, min, max; 443 u_int cpu = PCPU_GET(cpuid); 444 u_int first, i, size; 445 446 first = CPU_FIRST(); 447 if (cpu == first) 448 return; 449 min = INT64_MIN; 450 max = INT64_MAX; 451 size = (mp_maxid + 1) * 3; 452 for (i = 0, tsc = arg; i < N; i++, tsc += size) { 453 d = tsc[first * 3] - tsc[cpu * 3 + 1]; 454 if (d > min) 455 min = d; 456 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2]; 457 if (d > min) 458 min = d; 459 d = tsc[first * 3 + 1] - tsc[cpu * 3]; 460 if (d < max) 461 max = d; 462 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1]; 463 if (d < max) 464 max = d; 465 } 466 if (min > max) 467 return; 468 d = min / 2 + max / 2; 469 __asm __volatile ( 470 "movl $0x10, %%ecx\n\t" 471 "rdmsr\n\t" 472 "addl %%edi, %%eax\n\t" 473 "adcl %%esi, %%edx\n\t" 474 "wrmsr\n" 475 : /* No output */ 476 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32)) 477 : "ax", "cx", "dx", "cc" 478 ); 479 } 480 481 static int 482 test_tsc(void) 483 { 484 uint64_t *data, *tsc; 485 u_int i, size, adj; 486 487 if ((!smp_tsc && !tsc_is_invariant) || vm_guest) 488 return (-100); 489 size = (mp_maxid + 1) * 3; 490 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 491 adj = 0; 492 retry: 493 for (i = 0, tsc = data; i < N; i++, tsc += size) 494 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 495 smp_tsc = 1; /* XXX */ 496 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 497 smp_no_rendevous_barrier, data); 498 if (!smp_tsc && adj < smp_tsc_adjust) { 499 adj++; 500 smp_rendezvous(smp_no_rendevous_barrier, adj_smp_tsc, 501 smp_no_rendevous_barrier, data); 502 goto retry; 503 } 504 free(data, M_TEMP); 505 if (bootverbose) 506 printf("SMP: %sed TSC synchronization test%s\n", 507 smp_tsc ? "pass" : "fail", 508 adj > 0 ? " after adjustment" : ""); 509 if (smp_tsc && tsc_is_invariant) { 510 switch (cpu_vendor_id) { 511 case CPU_VENDOR_AMD: 512 /* 513 * Starting with Family 15h processors, TSC clock 514 * source is in the north bridge. Check whether 515 * we have a single-socket/multi-core platform. 516 * XXX Need more work for complex cases. 517 */ 518 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 519 (amd_feature2 & AMDID2_CMP) == 0 || 520 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 521 break; 522 return (1000); 523 case CPU_VENDOR_INTEL: 524 /* 525 * XXX Assume Intel platforms have synchronized TSCs. 526 */ 527 return (1000); 528 } 529 return (800); 530 } 531 return (-100); 532 } 533 534 #undef N 535 536 #else 537 538 /* 539 * The function is not called, it is provided to avoid linking failure 540 * on uniprocessor kernel. 541 */ 542 static int 543 test_tsc(void) 544 { 545 546 return (0); 547 } 548 549 #endif /* SMP */ 550 551 static void 552 init_TSC_tc(void) 553 { 554 uint64_t max_freq; 555 int shift; 556 557 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 558 return; 559 560 /* 561 * Limit timecounter frequency to fit in an int and prevent it from 562 * overflowing too fast. 563 */ 564 max_freq = UINT_MAX; 565 566 /* 567 * We can not use the TSC if we support APM. Precise timekeeping 568 * on an APM'ed machine is at best a fools pursuit, since 569 * any and all of the time spent in various SMM code can't 570 * be reliably accounted for. Reading the RTC is your only 571 * source of reliable time info. The i8254 loses too, of course, 572 * but we need to have some kind of time... 573 * We don't know at this point whether APM is going to be used 574 * or not, nor when it might be activated. Play it safe. 575 */ 576 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 577 tsc_timecounter.tc_quality = -1000; 578 if (bootverbose) 579 printf("TSC timecounter disabled: APM enabled.\n"); 580 goto init; 581 } 582 583 /* 584 * We cannot use the TSC if it stops incrementing in deep sleep. 585 * Currently only Intel CPUs are known for this problem unless 586 * the invariant TSC bit is set. 587 */ 588 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 589 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 590 tsc_timecounter.tc_quality = -1000; 591 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 592 if (bootverbose) 593 printf("TSC timecounter disabled: C3 enabled.\n"); 594 goto init; 595 } 596 597 /* 598 * We can not use the TSC in SMP mode unless the TSCs on all CPUs 599 * are synchronized. If the user is sure that the system has 600 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a 601 * non-zero value. The TSC seems unreliable in virtualized SMP 602 * environments, so it is set to a negative quality in those cases. 603 */ 604 if (mp_ncpus > 1) 605 tsc_timecounter.tc_quality = test_tsc(); 606 else if (tsc_is_invariant) 607 tsc_timecounter.tc_quality = 1000; 608 max_freq >>= tsc_shift; 609 610 init: 611 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) 612 ; 613 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { 614 if (cpu_vendor_id == CPU_VENDOR_AMD) { 615 tsc_timecounter.tc_get_timecount = shift > 0 ? 616 tsc_get_timecount_low_mfence : 617 tsc_get_timecount_mfence; 618 } else { 619 tsc_timecounter.tc_get_timecount = shift > 0 ? 620 tsc_get_timecount_low_lfence : 621 tsc_get_timecount_lfence; 622 } 623 } else { 624 tsc_timecounter.tc_get_timecount = shift > 0 ? 625 tsc_get_timecount_low : tsc_get_timecount; 626 } 627 if (shift > 0) { 628 tsc_timecounter.tc_name = "TSC-low"; 629 if (bootverbose) 630 printf("TSC timecounter discards lower %d bit(s)\n", 631 shift); 632 } 633 if (tsc_freq != 0) { 634 tsc_timecounter.tc_frequency = tsc_freq >> shift; 635 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 636 tc_init(&tsc_timecounter); 637 } 638 } 639 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 640 641 /* 642 * When cpufreq levels change, find out about the (new) max frequency. We 643 * use this to update CPU accounting in case it got a lower estimate at boot. 644 */ 645 static void 646 tsc_levels_changed(void *arg, int unit) 647 { 648 device_t cf_dev; 649 struct cf_level *levels; 650 int count, error; 651 uint64_t max_freq; 652 653 /* Only use values from the first CPU, assuming all are equal. */ 654 if (unit != 0) 655 return; 656 657 /* Find the appropriate cpufreq device instance. */ 658 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 659 if (cf_dev == NULL) { 660 printf("tsc_levels_changed() called but no cpufreq device?\n"); 661 return; 662 } 663 664 /* Get settings from the device and find the max frequency. */ 665 count = 64; 666 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 667 if (levels == NULL) 668 return; 669 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 670 if (error == 0 && count != 0) { 671 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 672 set_cputicker(rdtsc, max_freq, 1); 673 } else 674 printf("tsc_levels_changed: no max freq found\n"); 675 free(levels, M_TEMP); 676 } 677 678 /* 679 * If the TSC timecounter is in use, veto the pending change. It may be 680 * possible in the future to handle a dynamically-changing timecounter rate. 681 */ 682 static void 683 tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 684 { 685 686 if (*status != 0 || timecounter != &tsc_timecounter) 687 return; 688 689 printf("timecounter TSC must not be in use when " 690 "changing frequencies; change denied\n"); 691 *status = EBUSY; 692 } 693 694 /* Update TSC freq with the value indicated by the caller. */ 695 static void 696 tsc_freq_changed(void *arg, const struct cf_level *level, int status) 697 { 698 uint64_t freq; 699 700 /* If there was an error during the transition, don't do anything. */ 701 if (tsc_disabled || status != 0) 702 return; 703 704 /* Total setting for this level gives the new frequency in MHz. */ 705 freq = (uint64_t)level->total_set.freq * 1000000; 706 atomic_store_rel_64(&tsc_freq, freq); 707 tsc_timecounter.tc_frequency = 708 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 709 } 710 711 static int 712 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 713 { 714 int error; 715 uint64_t freq; 716 717 freq = atomic_load_acq_64(&tsc_freq); 718 if (freq == 0) 719 return (EOPNOTSUPP); 720 error = sysctl_handle_64(oidp, &freq, 0, req); 721 if (error == 0 && req->newptr != NULL) { 722 atomic_store_rel_64(&tsc_freq, freq); 723 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 724 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 725 } 726 return (error); 727 } 728 729 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 730 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 731 732 static u_int 733 tsc_get_timecount(struct timecounter *tc __unused) 734 { 735 736 return (rdtsc32()); 737 } 738 739 static inline u_int 740 tsc_get_timecount_low(struct timecounter *tc) 741 { 742 uint32_t rv; 743 744 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 745 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 746 return (rv); 747 } 748 749 static u_int 750 tsc_get_timecount_lfence(struct timecounter *tc __unused) 751 { 752 753 lfence(); 754 return (rdtsc32()); 755 } 756 757 static u_int 758 tsc_get_timecount_low_lfence(struct timecounter *tc) 759 { 760 761 lfence(); 762 return (tsc_get_timecount_low(tc)); 763 } 764 765 static u_int 766 tsc_get_timecount_mfence(struct timecounter *tc __unused) 767 { 768 769 mfence(); 770 return (rdtsc32()); 771 } 772 773 static u_int 774 tsc_get_timecount_low_mfence(struct timecounter *tc) 775 { 776 777 mfence(); 778 return (tsc_get_timecount_low(tc)); 779 } 780 781 uint32_t 782 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th) 783 { 784 785 vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 786 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 787 return (timecounter == &tsc_timecounter); 788 } 789 790 #ifdef COMPAT_FREEBSD32 791 uint32_t 792 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 793 { 794 795 vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 796 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 797 return (timecounter == &tsc_timecounter); 798 } 799 #endif 800