Lines Matching +full:ref2 +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
81 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); in __cyc2ns_read()
82 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); in __cyc2ns_read()
83 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); in __cyc2ns_read()
114 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
115 * (64-bit result) can be used.
120 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
164 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit in __set_cyc2ns_scale()
165 * value) - refer perf_event_mmap_page documentation in perf_event.h. in __set_cyc2ns_scale()
172 data.cyc2ns_offset = ns_now - in __set_cyc2ns_scale()
177 write_seqcount_latch_begin(&c2n->seq); in __set_cyc2ns_scale()
178 c2n->data[0] = data; in __set_cyc2ns_scale()
179 write_seqcount_latch(&c2n->seq); in __set_cyc2ns_scale()
180 c2n->data[1] = data; in __set_cyc2ns_scale()
181 write_seqcount_latch_end(&c2n->seq); in __set_cyc2ns_scale()
205 seqcount_latch_init(&c2n->seq); in cyc2ns_init_boot_cpu()
218 struct cyc2ns_data *data = c2n->data; in cyc2ns_init_secondary_cpus()
222 seqcount_latch_init(&c2n->seq); in cyc2ns_init_secondary_cpus()
224 c2n->data[0] = data[0]; in cyc2ns_init_secondary_cpus()
225 c2n->data[1] = data[1]; in cyc2ns_init_secondary_cpus()
231 * Scheduler clock - returns current time in nanosec units.
252 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); in native_sched_clock()
368 if ((t2 - t1) < thresh) in tsc_read_refs()
383 hpet2 -= hpet1; in calc_hpet_ref()
403 pm2 -= pm1; in calc_pmtimer_ref()
466 delta = t2 - tsc; in pit_calibrate_tsc()
488 delta = t2 - t1; in pit_calibrate_tsc()
496 * non-virtualized hardware.
500 * - the PIT is running at roughly 1.19MHz
502 * - each IO is going to take about 1us on real hardware,
505 * update - anything else implies a unacceptably slow CPU
508 * - with 256 PIT ticks to read the value, we have 214us to
512 * - We're doing 2 reads per loop (LSB, MSB), and we expect
517 * - if the PIT is stuck, and we see *many* more reads, we
546 *deltap = get_cycles() - prev_tsc; in pit_expect_msb()
578 * Counter 2, mode 0 (one-shot), binary count in quick_pit_calibrate()
582 * final output frequency as a decrement-by-one), in quick_pit_calibrate()
595 * to do that is to just read back the 16-bit counter in quick_pit_calibrate()
602 if (!pit_expect_msb(0xff-i, &delta, &d2)) in quick_pit_calibrate()
605 delta -= tsc; in quick_pit_calibrate()
628 if (!pit_verify_msb(0xfe - i)) in quick_pit_calibrate()
646 * kHz = ticks / time-in-seconds / 1000; in quick_pit_calibrate()
647 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 in quick_pit_calibrate()
648 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) in quick_pit_calibrate()
657 * native_calibrate_tsc - determine TSC frequency
757 u64 tsc1, tsc2, delta, ref1, ref2; in pit_hpet_ptimer_calibrate_cpu() local
804 tsc2 = tsc_read_refs(&ref2, hpet); in pit_hpet_ptimer_calibrate_cpu()
811 if (ref1 == ref2) in pit_hpet_ptimer_calibrate_cpu()
818 tsc2 = (tsc2 - tsc1) * 1000000LL; in pit_hpet_ptimer_calibrate_cpu()
820 tsc2 = calc_hpet_ref(tsc2, ref1, ref2); in pit_hpet_ptimer_calibrate_cpu()
822 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); in pit_hpet_ptimer_calibrate_cpu()
863 if (!hpet && !ref1 && !ref2) { in pit_hpet_ptimer_calibrate_cpu()
882 if (!hpet && !ref1 && !ref2) { in pit_hpet_ptimer_calibrate_cpu()
905 * native_calibrate_cpu_early - can calibrate the cpu early in boot
923 * native_calibrate_cpu - calibrate the cpu
947 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) in recalibrate_cpu_khz()
994 offset = cyc2ns_suspend - sched_clock(); in tsc_restore_sched_clock_state()
1031 ref_freq = freq->old; in time_cpufreq_notifier()
1036 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || in time_cpufreq_notifier()
1037 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { in time_cpufreq_notifier()
1039 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); in time_cpufreq_notifier()
1041 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); in time_cpufreq_notifier()
1042 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) in time_cpufreq_notifier()
1045 set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); in time_cpufreq_notifier()
1085 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required, in detect_art()
1117 * structure to avoid a nasty time-warp. This can be observed in a
1128 * checking the result of read_tsc() - cycle_last for being negative.
1167 .name = "tsc-early",
1244 /* Geode_LX - the OLPC CPU has a very reliable TSC */ in check_system_tsc_reliable()
1254 * - TSC running at constant frequency in check_system_tsc_reliable()
1255 * - TSC which does not stop in C-States in check_system_tsc_reliable()
1256 * - the TSC_ADJUST register which allows to detect even minimal in check_system_tsc_reliable()
1258 * - not more than four packages in check_system_tsc_reliable()
1302 * tsc_refine_calibration_work - Further refine tsc freq calibration
1354 delta = tsc_stop - tsc_start; in tsc_refine_calibration_work()
1365 if (abs(tsc_khz - freq) > (tsc_khz >> 11)) { in tsc_refine_calibration_work()
1381 if (abs(tsc_khz - freq) > tsc_khz/100) in tsc_refine_calibration_work()
1462 /* We should not be here with non-native cpu calibration */ in determine_cpu_tsc_frequencies()
1468 * Trust non-zero tsc_khz as authoritative, in determine_cpu_tsc_frequencies()
1474 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) in determine_cpu_tsc_frequencies()
1515 /* Don't change UV TSC multi-chassis synchronization */ in tsc_early_init()