1 /* 2 * Copyright 2001 MontaVista Software Inc. 3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net 4 * Copyright (c) 2003, 2004 Maciej W. Rozycki 5 * 6 * Common time service routines for MIPS machines. See 7 * Documentation/mips/time.README. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2 of the License, or (at your 12 * option) any later version. 13 */ 14 #include <linux/clockchips.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/sched.h> 19 #include <linux/param.h> 20 #include <linux/profile.h> 21 #include <linux/time.h> 22 #include <linux/timex.h> 23 #include <linux/smp.h> 24 #include <linux/kernel_stat.h> 25 #include <linux/spinlock.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kallsyms.h> 29 30 #include <asm/bootinfo.h> 31 #include <asm/cache.h> 32 #include <asm/compiler.h> 33 #include <asm/cpu.h> 34 #include <asm/cpu-features.h> 35 #include <asm/div64.h> 36 #include <asm/sections.h> 37 #include <asm/smtc_ipi.h> 38 #include <asm/time.h> 39 40 #include <irq.h> 41 42 /* 43 * The integer part of the number of usecs per jiffy is taken from tick, 44 * but the fractional part is not recorded, so we calculate it using the 45 * initial value of HZ. This aids systems where tick isn't really an 46 * integer (e.g. for HZ = 128). 47 */ 48 #define USECS_PER_JIFFY TICK_SIZE 49 #define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ)) 50 51 #define TICK_SIZE (tick_nsec / 1000) 52 53 /* 54 * forward reference 55 */ 56 DEFINE_SPINLOCK(rtc_lock); 57 EXPORT_SYMBOL(rtc_lock); 58 59 int __weak rtc_mips_set_time(unsigned long sec) 60 { 61 return 0; 62 } 63 EXPORT_SYMBOL(rtc_mips_set_time); 64 65 int __weak rtc_mips_set_mmss(unsigned long nowtime) 66 { 67 return rtc_mips_set_time(nowtime); 68 } 69 70 int update_persistent_clock(struct timespec now) 71 { 72 return rtc_mips_set_mmss(now.tv_sec); 73 } 74 75 /* how many counter cycles in a jiffy */ 76 static unsigned long cycles_per_jiffy __read_mostly; 77 78 /* 79 * Null timer ack for systems not needing one (e.g. i8254). 80 */ 81 static void null_timer_ack(void) { /* nothing */ } 82 83 /* 84 * Null high precision timer functions for systems lacking one. 85 */ 86 static cycle_t null_hpt_read(void) 87 { 88 return 0; 89 } 90 91 /* 92 * Timer ack for an R4k-compatible timer of a known frequency. 93 */ 94 static void c0_timer_ack(void) 95 { 96 write_c0_compare(read_c0_compare()); 97 } 98 99 /* 100 * High precision timer functions for a R4k-compatible timer. 101 */ 102 static cycle_t c0_hpt_read(void) 103 { 104 return read_c0_count(); 105 } 106 107 int (*mips_timer_state)(void); 108 void (*mips_timer_ack)(void); 109 110 /* 111 * local_timer_interrupt() does profiling and process accounting 112 * on a per-CPU basis. 113 * 114 * In UP mode, it is invoked from the (global) timer_interrupt. 115 * 116 * In SMP mode, it might invoked by per-CPU timer interrupt, or 117 * a broadcasted inter-processor interrupt which itself is triggered 118 * by the global timer interrupt. 119 */ 120 void local_timer_interrupt(int irq, void *dev_id) 121 { 122 profile_tick(CPU_PROFILING); 123 update_process_times(user_mode(get_irq_regs())); 124 } 125 126 int null_perf_irq(void) 127 { 128 return 0; 129 } 130 131 EXPORT_SYMBOL(null_perf_irq); 132 133 int (*perf_irq)(void) = null_perf_irq; 134 135 EXPORT_SYMBOL(perf_irq); 136 137 /* 138 * Timer interrupt 139 */ 140 int cp0_compare_irq; 141 142 /* 143 * Performance counter IRQ or -1 if shared with timer 144 */ 145 int cp0_perfcount_irq; 146 EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 147 148 /* 149 * Possibly handle a performance counter interrupt. 150 * Return true if the timer interrupt should not be checked 151 */ 152 static inline int handle_perf_irq(int r2) 153 { 154 /* 155 * The performance counter overflow interrupt may be shared with the 156 * timer interrupt (cp0_perfcount_irq < 0). If it is and a 157 * performance counter has overflowed (perf_irq() == IRQ_HANDLED) 158 * and we can't reliably determine if a counter interrupt has also 159 * happened (!r2) then don't check for a timer interrupt. 160 */ 161 return (cp0_perfcount_irq < 0) && 162 perf_irq() == IRQ_HANDLED && 163 !r2; 164 } 165 166 /* 167 * time_init() - it does the following things. 168 * 169 * 1) plat_time_init() - 170 * a) (optional) set up RTC routines, 171 * b) (optional) calibrate and set the mips_hpt_frequency 172 * (only needed if you intended to use cpu counter as timer interrupt 173 * source) 174 * 2) calculate a couple of cached variables for later usage 175 * 3) plat_timer_setup() - 176 * a) (optional) over-write any choices made above by time_init(). 177 * b) machine specific code should setup the timer irqaction. 178 * c) enable the timer interrupt 179 */ 180 181 unsigned int mips_hpt_frequency; 182 183 static unsigned int __init calibrate_hpt(void) 184 { 185 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz; 186 187 const int loops = HZ / 10; 188 int log_2_loops = 0; 189 int i; 190 191 /* 192 * We want to calibrate for 0.1s, but to avoid a 64-bit 193 * division we round the number of loops up to the nearest 194 * power of 2. 195 */ 196 while (loops > 1 << log_2_loops) 197 log_2_loops++; 198 i = 1 << log_2_loops; 199 200 /* 201 * Wait for a rising edge of the timer interrupt. 202 */ 203 while (mips_timer_state()); 204 while (!mips_timer_state()); 205 206 /* 207 * Now see how many high precision timer ticks happen 208 * during the calculated number of periods between timer 209 * interrupts. 210 */ 211 hpt_start = clocksource_mips.read(); 212 do { 213 while (mips_timer_state()); 214 while (!mips_timer_state()); 215 } while (--i); 216 hpt_end = clocksource_mips.read(); 217 218 hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask; 219 hz = HZ; 220 frequency = hpt_count * hz; 221 222 return frequency >> log_2_loops; 223 } 224 225 struct clocksource clocksource_mips = { 226 .name = "MIPS", 227 .mask = CLOCKSOURCE_MASK(32), 228 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 229 }; 230 231 static int mips_next_event(unsigned long delta, 232 struct clock_event_device *evt) 233 { 234 unsigned int cnt; 235 int res; 236 237 #ifdef CONFIG_MIPS_MT_SMTC 238 { 239 unsigned long flags, vpflags; 240 local_irq_save(flags); 241 vpflags = dvpe(); 242 #endif 243 cnt = read_c0_count(); 244 cnt += delta; 245 write_c0_compare(cnt); 246 res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0; 247 #ifdef CONFIG_MIPS_MT_SMTC 248 evpe(vpflags); 249 local_irq_restore(flags); 250 } 251 #endif 252 return res; 253 } 254 255 static void mips_set_mode(enum clock_event_mode mode, 256 struct clock_event_device *evt) 257 { 258 /* Nothing to do ... */ 259 } 260 261 static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 262 static int cp0_timer_irq_installed; 263 264 static irqreturn_t timer_interrupt(int irq, void *dev_id) 265 { 266 const int r2 = cpu_has_mips_r2; 267 struct clock_event_device *cd; 268 int cpu = smp_processor_id(); 269 270 /* 271 * Suckage alert: 272 * Before R2 of the architecture there was no way to see if a 273 * performance counter interrupt was pending, so we have to run 274 * the performance counter interrupt handler anyway. 275 */ 276 if (handle_perf_irq(r2)) 277 goto out; 278 279 /* 280 * The same applies to performance counter interrupts. But with the 281 * above we now know that the reason we got here must be a timer 282 * interrupt. Being the paranoiacs we are we check anyway. 283 */ 284 if (!r2 || (read_c0_cause() & (1 << 30))) { 285 c0_timer_ack(); 286 #ifdef CONFIG_MIPS_MT_SMTC 287 if (cpu_data[cpu].vpe_id) 288 goto out; 289 cpu = 0; 290 #endif 291 cd = &per_cpu(mips_clockevent_device, cpu); 292 cd->event_handler(cd); 293 } 294 295 out: 296 return IRQ_HANDLED; 297 } 298 299 static struct irqaction timer_irqaction = { 300 .handler = timer_interrupt, 301 #ifdef CONFIG_MIPS_MT_SMTC 302 .flags = IRQF_DISABLED, 303 #else 304 .flags = IRQF_DISABLED | IRQF_PERCPU, 305 #endif 306 .name = "timer", 307 }; 308 309 static void __init init_mips_clocksource(void) 310 { 311 u64 temp; 312 u32 shift; 313 314 if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read) 315 return; 316 317 /* Calclate a somewhat reasonable rating value */ 318 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; 319 /* Find a shift value */ 320 for (shift = 32; shift > 0; shift--) { 321 temp = (u64) NSEC_PER_SEC << shift; 322 do_div(temp, mips_hpt_frequency); 323 if ((temp >> 32) == 0) 324 break; 325 } 326 clocksource_mips.shift = shift; 327 clocksource_mips.mult = (u32)temp; 328 329 clocksource_register(&clocksource_mips); 330 } 331 332 void __init __weak plat_time_init(void) 333 { 334 } 335 336 void __init __weak plat_timer_setup(struct irqaction *irq) 337 { 338 } 339 340 #ifdef CONFIG_MIPS_MT_SMTC 341 DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); 342 343 static void smtc_set_mode(enum clock_event_mode mode, 344 struct clock_event_device *evt) 345 { 346 } 347 348 int dummycnt[NR_CPUS]; 349 350 static void mips_broadcast(cpumask_t mask) 351 { 352 unsigned int cpu; 353 354 for_each_cpu_mask(cpu, mask) 355 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); 356 } 357 358 static void setup_smtc_dummy_clockevent_device(void) 359 { 360 //uint64_t mips_freq = mips_hpt_^frequency; 361 unsigned int cpu = smp_processor_id(); 362 struct clock_event_device *cd; 363 364 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 365 366 cd->name = "SMTC"; 367 cd->features = CLOCK_EVT_FEAT_DUMMY; 368 369 /* Calculate the min / max delta */ 370 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 371 cd->shift = 0; //32; 372 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); 373 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); 374 375 cd->rating = 200; 376 cd->irq = 17; //-1; 377 // if (cpu) 378 // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); 379 // else 380 cd->cpumask = cpumask_of_cpu(cpu); 381 382 cd->set_mode = smtc_set_mode; 383 384 cd->broadcast = mips_broadcast; 385 386 clockevents_register_device(cd); 387 } 388 #endif 389 390 static void mips_event_handler(struct clock_event_device *dev) 391 { 392 } 393 394 void __cpuinit mips_clockevent_init(void) 395 { 396 uint64_t mips_freq = mips_hpt_frequency; 397 unsigned int cpu = smp_processor_id(); 398 struct clock_event_device *cd; 399 unsigned int irq = MIPS_CPU_IRQ_BASE + 7; 400 401 if (!cpu_has_counter) 402 return; 403 404 #ifdef CONFIG_MIPS_MT_SMTC 405 setup_smtc_dummy_clockevent_device(); 406 407 /* 408 * On SMTC we only register VPE0's compare interrupt as clockevent 409 * device. 410 */ 411 if (cpu) 412 return; 413 #endif 414 415 cd = &per_cpu(mips_clockevent_device, cpu); 416 417 cd->name = "MIPS"; 418 cd->features = CLOCK_EVT_FEAT_ONESHOT; 419 420 /* Calculate the min / max delta */ 421 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 422 cd->shift = 32; 423 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 424 cd->min_delta_ns = clockevent_delta2ns(0x30, cd); 425 426 cd->rating = 300; 427 cd->irq = irq; 428 #ifdef CONFIG_MIPS_MT_SMTC 429 cd->cpumask = CPU_MASK_ALL; 430 #else 431 cd->cpumask = cpumask_of_cpu(cpu); 432 #endif 433 cd->set_next_event = mips_next_event; 434 cd->set_mode = mips_set_mode; 435 cd->event_handler = mips_event_handler; 436 437 clockevents_register_device(cd); 438 439 if (!cp0_timer_irq_installed) { 440 #ifdef CONFIG_MIPS_MT_SMTC 441 #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) 442 setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT); 443 #else 444 setup_irq(irq, &timer_irqaction); 445 #endif /* CONFIG_MIPS_MT_SMTC */ 446 cp0_timer_irq_installed = 1; 447 } 448 } 449 450 void __init time_init(void) 451 { 452 plat_time_init(); 453 454 /* Choose appropriate high precision timer routines. */ 455 if (!cpu_has_counter && !clocksource_mips.read) 456 /* No high precision timer -- sorry. */ 457 clocksource_mips.read = null_hpt_read; 458 else if (!mips_hpt_frequency && !mips_timer_state) { 459 /* A high precision timer of unknown frequency. */ 460 if (!clocksource_mips.read) 461 /* No external high precision timer -- use R4k. */ 462 clocksource_mips.read = c0_hpt_read; 463 } else { 464 /* We know counter frequency. Or we can get it. */ 465 if (!clocksource_mips.read) { 466 /* No external high precision timer -- use R4k. */ 467 clocksource_mips.read = c0_hpt_read; 468 469 if (!mips_timer_state) { 470 /* No external timer interrupt -- use R4k. */ 471 mips_timer_ack = c0_timer_ack; 472 /* Calculate cache parameters. */ 473 cycles_per_jiffy = 474 (mips_hpt_frequency + HZ / 2) / HZ; 475 } 476 } 477 if (!mips_hpt_frequency) 478 mips_hpt_frequency = calibrate_hpt(); 479 480 /* Report the high precision timer rate for a reference. */ 481 printk("Using %u.%03u MHz high precision timer.\n", 482 ((mips_hpt_frequency + 500) / 1000) / 1000, 483 ((mips_hpt_frequency + 500) / 1000) % 1000); 484 485 #ifdef CONFIG_IRQ_CPU 486 setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction); 487 #endif 488 } 489 490 if (!mips_timer_ack) 491 /* No timer interrupt ack (e.g. i8254). */ 492 mips_timer_ack = null_timer_ack; 493 494 /* 495 * Call board specific timer interrupt setup. 496 * 497 * this pointer must be setup in machine setup routine. 498 * 499 * Even if a machine chooses to use a low-level timer interrupt, 500 * it still needs to setup the timer_irqaction. 501 * In that case, it might be better to set timer_irqaction.handler 502 * to be NULL function so that we are sure the high-level code 503 * is not invoked accidentally. 504 */ 505 plat_timer_setup(&timer_irqaction); 506 507 init_mips_clocksource(); 508 mips_clockevent_init(); 509 } 510