1 /* 2 * linux/arch/parisc/kernel/time.c 3 * 4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King 6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) 7 * 8 * 1994-07-02 Alan Modra 9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime 10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 11 * "A Kernel Model for Precision Timekeeping" by Dave Mills 12 */ 13 #include <linux/errno.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/param.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/interrupt.h> 21 #include <linux/time.h> 22 #include <linux/init.h> 23 #include <linux/smp.h> 24 #include <linux/profile.h> 25 #include <linux/clocksource.h> 26 #include <linux/platform_device.h> 27 #include <linux/ftrace.h> 28 29 #include <asm/uaccess.h> 30 #include <asm/io.h> 31 #include <asm/irq.h> 32 #include <asm/page.h> 33 #include <asm/param.h> 34 #include <asm/pdc.h> 35 #include <asm/led.h> 36 37 #include <linux/timex.h> 38 39 static unsigned long clocktick __read_mostly; /* timer cycles per tick */ 40 41 #ifndef CONFIG_64BIT 42 /* 43 * The processor-internal cycle counter (Control Register 16) is used as time 44 * source for the sched_clock() function. This register is 64bit wide on a 45 * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always 46 * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits 47 * with a per-cpu variable which we increase every time the counter 48 * wraps-around (which happens every ~4 secounds). 49 */ 50 static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); 51 #endif 52 53 /* 54 * We keep time on PA-RISC Linux by using the Interval Timer which is 55 * a pair of registers; one is read-only and one is write-only; both 56 * accessed through CR16. The read-only register is 32 or 64 bits wide, 57 * and increments by 1 every CPU clock tick. The architecture only 58 * guarantees us a rate between 0.5 and 2, but all implementations use a 59 * rate of 1. The write-only register is 32-bits wide. When the lowest 60 * 32 bits of the read-only register compare equal to the write-only 61 * register, it raises a maskable external interrupt. Each processor has 62 * an Interval Timer of its own and they are not synchronised. 63 * 64 * We want to generate an interrupt every 1/HZ seconds. So we program 65 * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data 66 * is programmed with the intended time of the next tick. We can be 67 * held off for an arbitrarily long period of time by interrupts being 68 * disabled, so we may miss one or more ticks. 69 */ 70 irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) 71 { 72 unsigned long now, now2; 73 unsigned long next_tick; 74 unsigned long cycles_elapsed, ticks_elapsed = 1; 75 unsigned long cycles_remainder; 76 unsigned int cpu = smp_processor_id(); 77 struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); 78 79 /* gcc can optimize for "read-only" case with a local clocktick */ 80 unsigned long cpt = clocktick; 81 82 profile_tick(CPU_PROFILING); 83 84 /* Initialize next_tick to the expected tick time. */ 85 next_tick = cpuinfo->it_value; 86 87 /* Get current cycle counter (Control Register 16). */ 88 now = mfctl(16); 89 90 cycles_elapsed = now - next_tick; 91 92 if ((cycles_elapsed >> 6) < cpt) { 93 /* use "cheap" math (add/subtract) instead 94 * of the more expensive div/mul method 95 */ 96 cycles_remainder = cycles_elapsed; 97 while (cycles_remainder > cpt) { 98 cycles_remainder -= cpt; 99 ticks_elapsed++; 100 } 101 } else { 102 /* TODO: Reduce this to one fdiv op */ 103 cycles_remainder = cycles_elapsed % cpt; 104 ticks_elapsed += cycles_elapsed / cpt; 105 } 106 107 /* convert from "division remainder" to "remainder of clock tick" */ 108 cycles_remainder = cpt - cycles_remainder; 109 110 /* Determine when (in CR16 cycles) next IT interrupt will fire. 111 * We want IT to fire modulo clocktick even if we miss/skip some. 112 * But those interrupts don't in fact get delivered that regularly. 113 */ 114 next_tick = now + cycles_remainder; 115 116 cpuinfo->it_value = next_tick; 117 118 /* Program the IT when to deliver the next interrupt. 119 * Only bottom 32-bits of next_tick are writable in CR16! 120 */ 121 mtctl(next_tick, 16); 122 123 #if !defined(CONFIG_64BIT) 124 /* check for overflow on a 32bit kernel (every ~4 seconds). */ 125 if (unlikely(next_tick < now)) 126 this_cpu_inc(cr16_high_32_bits); 127 #endif 128 129 /* Skip one clocktick on purpose if we missed next_tick. 130 * The new CR16 must be "later" than current CR16 otherwise 131 * itimer would not fire until CR16 wrapped - e.g 4 seconds 132 * later on a 1Ghz processor. We'll account for the missed 133 * tick on the next timer interrupt. 134 * 135 * "next_tick - now" will always give the difference regardless 136 * if one or the other wrapped. If "now" is "bigger" we'll end up 137 * with a very large unsigned number. 138 */ 139 now2 = mfctl(16); 140 if (next_tick - now2 > cpt) 141 mtctl(next_tick+cpt, 16); 142 143 #if 1 144 /* 145 * GGG: DEBUG code for how many cycles programming CR16 used. 146 */ 147 if (unlikely(now2 - now > 0x3000)) /* 12K cycles */ 148 printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!" 149 " cyc %lX rem %lX " 150 " next/now %lX/%lX\n", 151 cpu, now2 - now, cycles_elapsed, cycles_remainder, 152 next_tick, now ); 153 #endif 154 155 /* Can we differentiate between "early CR16" (aka Scenario 1) and 156 * "long delay" (aka Scenario 3)? I don't think so. 157 * 158 * Timer_interrupt will be delivered at least a few hundred cycles 159 * after the IT fires. But it's arbitrary how much time passes 160 * before we call it "late". I've picked one second. 161 * 162 * It's important NO printk's are between reading CR16 and 163 * setting up the next value. May introduce huge variance. 164 */ 165 if (unlikely(ticks_elapsed > HZ)) { 166 /* Scenario 3: very long delay? bad in any case */ 167 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" 168 " cycles %lX rem %lX " 169 " next/now %lX/%lX\n", 170 cpu, 171 cycles_elapsed, cycles_remainder, 172 next_tick, now ); 173 } 174 175 /* Done mucking with unreliable delivery of interrupts. 176 * Go do system house keeping. 177 */ 178 179 if (!--cpuinfo->prof_counter) { 180 cpuinfo->prof_counter = cpuinfo->prof_multiplier; 181 update_process_times(user_mode(get_irq_regs())); 182 } 183 184 if (cpu == 0) 185 xtime_update(ticks_elapsed); 186 187 return IRQ_HANDLED; 188 } 189 190 191 unsigned long profile_pc(struct pt_regs *regs) 192 { 193 unsigned long pc = instruction_pointer(regs); 194 195 if (regs->gr[0] & PSW_N) 196 pc -= 4; 197 198 #ifdef CONFIG_SMP 199 if (in_lock_functions(pc)) 200 pc = regs->gr[2]; 201 #endif 202 203 return pc; 204 } 205 EXPORT_SYMBOL(profile_pc); 206 207 208 /* clock source code */ 209 210 static cycle_t read_cr16(struct clocksource *cs) 211 { 212 return get_cycles(); 213 } 214 215 static struct clocksource clocksource_cr16 = { 216 .name = "cr16", 217 .rating = 300, 218 .read = read_cr16, 219 .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), 220 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 221 }; 222 223 int update_cr16_clocksource(void) 224 { 225 /* since the cr16 cycle counters are not synchronized across CPUs, 226 we'll check if we should switch to a safe clocksource: */ 227 if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) { 228 clocksource_change_rating(&clocksource_cr16, 0); 229 return 1; 230 } 231 232 return 0; 233 } 234 235 void __init start_cpu_itimer(void) 236 { 237 unsigned int cpu = smp_processor_id(); 238 unsigned long next_tick = mfctl(16) + clocktick; 239 240 #if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) 241 /* With multiple 64bit CPUs online, the cr16's are not syncronized. */ 242 if (cpu != 0) 243 clear_sched_clock_stable(); 244 #endif 245 246 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ 247 248 per_cpu(cpu_data, cpu).it_value = next_tick; 249 } 250 251 static int __init rtc_init(void) 252 { 253 struct platform_device *pdev; 254 255 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 256 return PTR_ERR_OR_ZERO(pdev); 257 } 258 device_initcall(rtc_init); 259 260 void read_persistent_clock(struct timespec *ts) 261 { 262 static struct pdc_tod tod_data; 263 if (pdc_tod_read(&tod_data) == 0) { 264 ts->tv_sec = tod_data.tod_sec; 265 ts->tv_nsec = tod_data.tod_usec * 1000; 266 } else { 267 printk(KERN_ERR "Error reading tod clock\n"); 268 ts->tv_sec = 0; 269 ts->tv_nsec = 0; 270 } 271 } 272 273 274 /* 275 * sched_clock() framework 276 */ 277 278 static u32 cyc2ns_mul __read_mostly; 279 static u32 cyc2ns_shift __read_mostly; 280 281 u64 sched_clock(void) 282 { 283 u64 now; 284 285 /* Get current cycle counter (Control Register 16). */ 286 #ifdef CONFIG_64BIT 287 now = mfctl(16); 288 #else 289 now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); 290 #endif 291 292 /* return the value in ns (cycles_2_ns) */ 293 return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); 294 } 295 296 297 /* 298 * timer interrupt and sched_clock() initialization 299 */ 300 301 void __init time_init(void) 302 { 303 unsigned long current_cr16_khz; 304 305 current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ 306 clocktick = (100 * PAGE0->mem_10msec) / HZ; 307 308 /* calculate mult/shift values for cr16 */ 309 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, 310 NSEC_PER_MSEC, 0); 311 312 #if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) 313 /* At bootup only one 64bit CPU is online and cr16 is "stable" */ 314 set_sched_clock_stable(); 315 #endif 316 317 start_cpu_itimer(); /* get CPU 0 started */ 318 319 /* register at clocksource framework */ 320 clocksource_register_khz(&clocksource_cr16, current_cr16_khz); 321 } 322