1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 by Ralf Baechle 7 */ 8 #include <linux/clocksource.h> 9 #include <linux/cpufreq.h> 10 #include <linux/init.h> 11 #include <linux/sched_clock.h> 12 13 #include <asm/time.h> 14 15 static u64 c0_hpt_read(struct clocksource *cs) 16 { 17 return read_c0_count(); 18 } 19 20 static struct clocksource clocksource_mips = { 21 .name = "MIPS", 22 .read = c0_hpt_read, 23 .mask = CLOCKSOURCE_MASK(32), 24 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 25 CLOCK_SOURCE_MUST_VERIFY | 26 CLOCK_SOURCE_VERIFY_PERCPU, 27 }; 28 29 static u64 __maybe_unused notrace r4k_read_sched_clock(void) 30 { 31 return read_c0_count(); 32 } 33 34 static inline unsigned int rdhwr_count(void) 35 { 36 unsigned int count; 37 38 __asm__ __volatile__( 39 " .set push\n" 40 " .set mips32r2\n" 41 " rdhwr %0, $2\n" 42 " .set pop\n" 43 : "=r" (count)); 44 45 return count; 46 } 47 48 static bool rdhwr_count_usable(void) 49 { 50 unsigned int prev, curr, i; 51 52 /* 53 * Older QEMUs have a broken implementation of RDHWR for the CP0 count 54 * which always returns a constant value. Try to identify this and don't 55 * use it in the VDSO if it is broken. This workaround can be removed 56 * once the fix has been in QEMU stable for a reasonable amount of time. 57 */ 58 for (i = 0, prev = rdhwr_count(); i < 100; i++) { 59 curr = rdhwr_count(); 60 61 if (curr != prev) 62 return true; 63 64 prev = curr; 65 } 66 67 pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n"); 68 return false; 69 } 70 71 static inline __init bool count_can_be_sched_clock(void) 72 { 73 if (IS_ENABLED(CONFIG_CPU_FREQ)) 74 return false; 75 76 if (num_possible_cpus() > 1 && 77 !IS_ENABLED(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK)) 78 return false; 79 80 return true; 81 } 82 83 #ifdef CONFIG_CPU_FREQ 84 85 static bool __read_mostly r4k_clock_unstable; 86 87 static void r4k_clocksource_unstable(char *reason) 88 { 89 if (r4k_clock_unstable) 90 return; 91 92 r4k_clock_unstable = true; 93 94 pr_info("R4K timer is unstable due to %s\n", reason); 95 96 clocksource_mark_unstable(&clocksource_mips); 97 } 98 99 static int r4k_cpufreq_callback(struct notifier_block *nb, 100 unsigned long val, void *data) 101 { 102 if (val == CPUFREQ_POSTCHANGE) 103 r4k_clocksource_unstable("CPU frequency change"); 104 105 return 0; 106 } 107 108 static struct notifier_block r4k_cpufreq_notifier = { 109 .notifier_call = r4k_cpufreq_callback, 110 }; 111 112 static int __init r4k_register_cpufreq_notifier(void) 113 { 114 return cpufreq_register_notifier(&r4k_cpufreq_notifier, 115 CPUFREQ_TRANSITION_NOTIFIER); 116 117 } 118 core_initcall(r4k_register_cpufreq_notifier); 119 120 #endif /* !CONFIG_CPU_FREQ */ 121 122 int __init init_r4k_clocksource(void) 123 { 124 if (!cpu_has_counter || !mips_hpt_frequency) 125 return -ENXIO; 126 127 /* Calculate a somewhat reasonable rating value */ 128 clocksource_mips.rating = 200; 129 clocksource_mips.rating += clamp(mips_hpt_frequency / 10000000, 0, 99); 130 131 /* 132 * R2 onwards makes the count accessible to user mode so it can be used 133 * by the VDSO (HWREna is configured by configure_hwrena()). 134 */ 135 if (cpu_has_mips_r2_r6 && rdhwr_count_usable()) 136 clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K; 137 138 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); 139 140 if (count_can_be_sched_clock()) 141 sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); 142 143 return 0; 144 } 145