xref: /linux/arch/alpha/kernel/time.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  *  linux/arch/alpha/kernel/time.c
3  *
4  *  Copyright (C) 1991, 1992, 1995, 1999, 2000  Linus Torvalds
5  *
6  * This file contains the PC-specific time handling details:
7  * reading the RTC at bootup, etc..
8  * 1994-07-02    Alan Modra
9  *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10  * 1995-03-26    Markus Kuhn
11  *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
12  *      precision CMOS clock update
13  * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
14  *		"A Kernel Model for Precision Timekeeping" by Dave Mills
15  * 1997-01-09    Adrian Sun
16  *      use interval timer if CONFIG_RTC=y
17  * 1997-10-29    John Bowman (bowman@math.ualberta.ca)
18  *      fixed tick loss calculation in timer_interrupt
19  *      (round system clock to nearest tick instead of truncating)
20  *      fixed algorithm in time_init for getting time from CMOS clock
21  * 1999-04-16	Thorsten Kranzkowski (dl8bcu@gmx.net)
22  *	fixed algorithm in do_gettimeofday() for calculating the precise time
23  *	from processor cycle counter (now taking lost_ticks into account)
24  * 2000-08-13	Jan-Benedict Glaw <jbglaw@lug-owl.de>
25  * 	Fixed time_init to be aware of epoches != 1900. This prevents
26  * 	booting up in 2048 for me;) Code is stolen from rtc.c.
27  * 2003-06-03	R. Scott Bailey <scott.bailey@eds.com>
28  *	Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM
29  */
30 #include <linux/errno.h>
31 #include <linux/module.h>
32 #include <linux/sched.h>
33 #include <linux/kernel.h>
34 #include <linux/param.h>
35 #include <linux/string.h>
36 #include <linux/mm.h>
37 #include <linux/delay.h>
38 #include <linux/ioport.h>
39 #include <linux/irq.h>
40 #include <linux/interrupt.h>
41 #include <linux/init.h>
42 #include <linux/bcd.h>
43 #include <linux/profile.h>
44 
45 #include <asm/uaccess.h>
46 #include <asm/io.h>
47 #include <asm/hwrpb.h>
48 #include <asm/8253pit.h>
49 
50 #include <linux/mc146818rtc.h>
51 #include <linux/time.h>
52 #include <linux/timex.h>
53 
54 #include "proto.h"
55 #include "irq_impl.h"
56 
57 static int set_rtc_mmss(unsigned long);
58 
59 DEFINE_SPINLOCK(rtc_lock);
60 EXPORT_SYMBOL(rtc_lock);
61 
62 #define TICK_SIZE (tick_nsec / 1000)
63 
64 /*
65  * Shift amount by which scaled_ticks_per_cycle is scaled.  Shifting
66  * by 48 gives us 16 bits for HZ while keeping the accuracy good even
67  * for large CPU clock rates.
68  */
69 #define FIX_SHIFT	48
70 
71 /* lump static variables together for more efficient access: */
72 static struct {
73 	/* cycle counter last time it got invoked */
74 	__u32 last_time;
75 	/* ticks/cycle * 2^48 */
76 	unsigned long scaled_ticks_per_cycle;
77 	/* last time the CMOS clock got updated */
78 	time_t last_rtc_update;
79 	/* partial unused tick */
80 	unsigned long partial_tick;
81 } state;
82 
83 unsigned long est_cycle_freq;
84 
85 
86 static inline __u32 rpcc(void)
87 {
88     __u32 result;
89     asm volatile ("rpcc %0" : "=r"(result));
90     return result;
91 }
92 
93 /*
94  * Scheduler clock - returns current time in nanosec units.
95  *
96  * Copied from ARM code for expediency... ;-}
97  */
98 unsigned long long sched_clock(void)
99 {
100         return (unsigned long long)jiffies * (1000000000 / HZ);
101 }
102 
103 
104 /*
105  * timer_interrupt() needs to keep up the real-time clock,
106  * as well as call the "do_timer()" routine every clocktick
107  */
108 irqreturn_t timer_interrupt(int irq, void *dev)
109 {
110 	unsigned long delta;
111 	__u32 now;
112 	long nticks;
113 
114 #ifndef CONFIG_SMP
115 	/* Not SMP, do kernel PC profiling here.  */
116 	profile_tick(CPU_PROFILING);
117 #endif
118 
119 	write_seqlock(&xtime_lock);
120 
121 	/*
122 	 * Calculate how many ticks have passed since the last update,
123 	 * including any previous partial leftover.  Save any resulting
124 	 * fraction for the next pass.
125 	 */
126 	now = rpcc();
127 	delta = now - state.last_time;
128 	state.last_time = now;
129 	delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
130 	state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
131 	nticks = delta >> FIX_SHIFT;
132 
133 	while (nticks > 0) {
134 		do_timer(1);
135 #ifndef CONFIG_SMP
136 		update_process_times(user_mode(get_irq_regs()));
137 #endif
138 		nticks--;
139 	}
140 
141 	/*
142 	 * If we have an externally synchronized Linux clock, then update
143 	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
144 	 * called as close as possible to 500 ms before the new second starts.
145 	 */
146 	if (ntp_synced()
147 	    && xtime.tv_sec > state.last_rtc_update + 660
148 	    && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
149 	    && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
150 		int tmp = set_rtc_mmss(xtime.tv_sec);
151 		state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
152 	}
153 
154 	write_sequnlock(&xtime_lock);
155 	return IRQ_HANDLED;
156 }
157 
158 void
159 common_init_rtc(void)
160 {
161 	unsigned char x;
162 
163 	/* Reset periodic interrupt frequency.  */
164 	x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f;
165         /* Test includes known working values on various platforms
166            where 0x26 is wrong; we refuse to change those. */
167 	if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) {
168 		printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x);
169 		CMOS_WRITE(0x26, RTC_FREQ_SELECT);
170 	}
171 
172 	/* Turn on periodic interrupts.  */
173 	x = CMOS_READ(RTC_CONTROL);
174 	if (!(x & RTC_PIE)) {
175 		printk("Turning on RTC interrupts.\n");
176 		x |= RTC_PIE;
177 		x &= ~(RTC_AIE | RTC_UIE);
178 		CMOS_WRITE(x, RTC_CONTROL);
179 	}
180 	(void) CMOS_READ(RTC_INTR_FLAGS);
181 
182 	outb(0x36, 0x43);	/* pit counter 0: system timer */
183 	outb(0x00, 0x40);
184 	outb(0x00, 0x40);
185 
186 	outb(0xb6, 0x43);	/* pit counter 2: speaker */
187 	outb(0x31, 0x42);
188 	outb(0x13, 0x42);
189 
190 	init_rtc_irq();
191 }
192 
193 
194 /* Validate a computed cycle counter result against the known bounds for
195    the given processor core.  There's too much brokenness in the way of
196    timing hardware for any one method to work everywhere.  :-(
197 
198    Return 0 if the result cannot be trusted, otherwise return the argument.  */
199 
200 static unsigned long __init
201 validate_cc_value(unsigned long cc)
202 {
203 	static struct bounds {
204 		unsigned int min, max;
205 	} cpu_hz[] __initdata = {
206 		[EV3_CPU]    = {   50000000,  200000000 },	/* guess */
207 		[EV4_CPU]    = {  100000000,  300000000 },
208 		[LCA4_CPU]   = {  100000000,  300000000 },	/* guess */
209 		[EV45_CPU]   = {  200000000,  300000000 },
210 		[EV5_CPU]    = {  250000000,  433000000 },
211 		[EV56_CPU]   = {  333000000,  667000000 },
212 		[PCA56_CPU]  = {  400000000,  600000000 },	/* guess */
213 		[PCA57_CPU]  = {  500000000,  600000000 },	/* guess */
214 		[EV6_CPU]    = {  466000000,  600000000 },
215 		[EV67_CPU]   = {  600000000,  750000000 },
216 		[EV68AL_CPU] = {  750000000,  940000000 },
217 		[EV68CB_CPU] = { 1000000000, 1333333333 },
218 		/* None of the following are shipping as of 2001-11-01.  */
219 		[EV68CX_CPU] = { 1000000000, 1700000000 },	/* guess */
220 		[EV69_CPU]   = { 1000000000, 1700000000 },	/* guess */
221 		[EV7_CPU]    = {  800000000, 1400000000 },	/* guess */
222 		[EV79_CPU]   = { 1000000000, 2000000000 },	/* guess */
223 	};
224 
225 	/* Allow for some drift in the crystal.  10MHz is more than enough.  */
226 	const unsigned int deviation = 10000000;
227 
228 	struct percpu_struct *cpu;
229 	unsigned int index;
230 
231 	cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset);
232 	index = cpu->type & 0xffffffff;
233 
234 	/* If index out of bounds, no way to validate.  */
235 	if (index >= ARRAY_SIZE(cpu_hz))
236 		return cc;
237 
238 	/* If index contains no data, no way to validate.  */
239 	if (cpu_hz[index].max == 0)
240 		return cc;
241 
242 	if (cc < cpu_hz[index].min - deviation
243 	    || cc > cpu_hz[index].max + deviation)
244 		return 0;
245 
246 	return cc;
247 }
248 
249 
250 /*
251  * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from
252  * arch/i386/time.c.
253  */
254 
255 #define CALIBRATE_LATCH	0xffff
256 #define TIMEOUT_COUNT	0x100000
257 
258 static unsigned long __init
259 calibrate_cc_with_pit(void)
260 {
261 	int cc, count = 0;
262 
263 	/* Set the Gate high, disable speaker */
264 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
265 
266 	/*
267 	 * Now let's take care of CTC channel 2
268 	 *
269 	 * Set the Gate high, program CTC channel 2 for mode 0,
270 	 * (interrupt on terminal count mode), binary count,
271 	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
272 	 */
273 	outb(0xb0, 0x43);		/* binary, mode 0, LSB/MSB, Ch 2 */
274 	outb(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
275 	outb(CALIBRATE_LATCH >> 8, 0x42);	/* MSB of count */
276 
277 	cc = rpcc();
278 	do {
279 		count++;
280 	} while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT);
281 	cc = rpcc() - cc;
282 
283 	/* Error: ECTCNEVERSET or ECPUTOOFAST.  */
284 	if (count <= 1 || count == TIMEOUT_COUNT)
285 		return 0;
286 
287 	return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1);
288 }
289 
290 /* The Linux interpretation of the CMOS clock register contents:
291    When the Update-In-Progress (UIP) flag goes from 1 to 0, the
292    RTC registers show the second which has precisely just started.
293    Let's hope other operating systems interpret the RTC the same way.  */
294 
295 static unsigned long __init
296 rpcc_after_update_in_progress(void)
297 {
298 	do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
299 	do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
300 
301 	return rpcc();
302 }
303 
304 void __init
305 time_init(void)
306 {
307 	unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch;
308 	unsigned long cycle_freq, tolerance;
309 	long diff;
310 
311 	/* Calibrate CPU clock -- attempt #1.  */
312 	if (!est_cycle_freq)
313 		est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
314 
315 	cc1 = rpcc();
316 
317 	/* Calibrate CPU clock -- attempt #2.  */
318 	if (!est_cycle_freq) {
319 		cc1 = rpcc_after_update_in_progress();
320 		cc2 = rpcc_after_update_in_progress();
321 		est_cycle_freq = validate_cc_value(cc2 - cc1);
322 		cc1 = cc2;
323 	}
324 
325 	cycle_freq = hwrpb->cycle_freq;
326 	if (est_cycle_freq) {
327 		/* If the given value is within 250 PPM of what we calculated,
328 		   accept it.  Otherwise, use what we found.  */
329 		tolerance = cycle_freq / 4000;
330 		diff = cycle_freq - est_cycle_freq;
331 		if (diff < 0)
332 			diff = -diff;
333 		if ((unsigned long)diff > tolerance) {
334 			cycle_freq = est_cycle_freq;
335 			printk("HWRPB cycle frequency bogus.  "
336 			       "Estimated %lu Hz\n", cycle_freq);
337 		} else {
338 			est_cycle_freq = 0;
339 		}
340 	} else if (! validate_cc_value (cycle_freq)) {
341 		printk("HWRPB cycle frequency bogus, "
342 		       "and unable to estimate a proper value!\n");
343 	}
344 
345 	/* From John Bowman <bowman@math.ualberta.ca>: allow the values
346 	   to settle, as the Update-In-Progress bit going low isn't good
347 	   enough on some hardware.  2ms is our guess; we haven't found
348 	   bogomips yet, but this is close on a 500Mhz box.  */
349 	__delay(1000000);
350 
351 	sec = CMOS_READ(RTC_SECONDS);
352 	min = CMOS_READ(RTC_MINUTES);
353 	hour = CMOS_READ(RTC_HOURS);
354 	day = CMOS_READ(RTC_DAY_OF_MONTH);
355 	mon = CMOS_READ(RTC_MONTH);
356 	year = CMOS_READ(RTC_YEAR);
357 
358 	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
359 		BCD_TO_BIN(sec);
360 		BCD_TO_BIN(min);
361 		BCD_TO_BIN(hour);
362 		BCD_TO_BIN(day);
363 		BCD_TO_BIN(mon);
364 		BCD_TO_BIN(year);
365 	}
366 
367 	/* PC-like is standard; used for year >= 70 */
368 	epoch = 1900;
369 	if (year < 20)
370 		epoch = 2000;
371 	else if (year >= 20 && year < 48)
372 		/* NT epoch */
373 		epoch = 1980;
374 	else if (year >= 48 && year < 70)
375 		/* Digital UNIX epoch */
376 		epoch = 1952;
377 
378 	printk(KERN_INFO "Using epoch = %d\n", epoch);
379 
380 	if ((year += epoch) < 1970)
381 		year += 100;
382 
383 	xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
384 	xtime.tv_nsec = 0;
385 
386         wall_to_monotonic.tv_sec -= xtime.tv_sec;
387         wall_to_monotonic.tv_nsec = 0;
388 
389 	if (HZ > (1<<16)) {
390 		extern void __you_loose (void);
391 		__you_loose();
392 	}
393 
394 	state.last_time = cc1;
395 	state.scaled_ticks_per_cycle
396 		= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
397 	state.last_rtc_update = 0;
398 	state.partial_tick = 0L;
399 
400 	/* Startup the timer source. */
401 	alpha_mv.init_rtc();
402 }
403 
404 /*
405  * Use the cycle counter to estimate an displacement from the last time
406  * tick.  Unfortunately the Alpha designers made only the low 32-bits of
407  * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
408  * part.  So we can't do the "find absolute time in terms of cycles" thing
409  * that the other ports do.
410  */
411 void
412 do_gettimeofday(struct timeval *tv)
413 {
414 	unsigned long flags;
415 	unsigned long sec, usec, seq;
416 	unsigned long delta_cycles, delta_usec, partial_tick;
417 
418 	do {
419 		seq = read_seqbegin_irqsave(&xtime_lock, flags);
420 
421 		delta_cycles = rpcc() - state.last_time;
422 		sec = xtime.tv_sec;
423 		usec = (xtime.tv_nsec / 1000);
424 		partial_tick = state.partial_tick;
425 
426 	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
427 
428 #ifdef CONFIG_SMP
429 	/* Until and unless we figure out how to get cpu cycle counters
430 	   in sync and keep them there, we can't use the rpcc tricks.  */
431 	delta_usec = 0;
432 #else
433 	/*
434 	 * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
435 	 *	= cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
436 	 *	= cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
437 	 *
438 	 * which, given a 600MHz cycle and a 1024Hz tick, has a
439 	 * dynamic range of about 1.7e17, which is less than the
440 	 * 1.8e19 in an unsigned long, so we are safe from overflow.
441 	 *
442 	 * Round, but with .5 up always, since .5 to even is harder
443 	 * with no clear gain.
444 	 */
445 
446 	delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
447 		      + partial_tick) * 15625;
448 	delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
449 #endif
450 
451 	usec += delta_usec;
452 	if (usec >= 1000000) {
453 		sec += 1;
454 		usec -= 1000000;
455 	}
456 
457 	tv->tv_sec = sec;
458 	tv->tv_usec = usec;
459 }
460 
461 EXPORT_SYMBOL(do_gettimeofday);
462 
463 int
464 do_settimeofday(struct timespec *tv)
465 {
466 	time_t wtm_sec, sec = tv->tv_sec;
467 	long wtm_nsec, nsec = tv->tv_nsec;
468 	unsigned long delta_nsec;
469 
470 	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
471 		return -EINVAL;
472 
473 	write_seqlock_irq(&xtime_lock);
474 
475 	/* The offset that is added into time in do_gettimeofday above
476 	   must be subtracted out here to keep a coherent view of the
477 	   time.  Without this, a full-tick error is possible.  */
478 
479 #ifdef CONFIG_SMP
480 	delta_nsec = 0;
481 #else
482 	delta_nsec = rpcc() - state.last_time;
483 	delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle
484 		      + state.partial_tick) * 15625;
485 	delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
486 	delta_nsec *= 1000;
487 #endif
488 
489 	nsec -= delta_nsec;
490 
491 	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
492 	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
493 
494 	set_normalized_timespec(&xtime, sec, nsec);
495 	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
496 
497 	ntp_clear();
498 
499 	write_sequnlock_irq(&xtime_lock);
500 	clock_was_set();
501 	return 0;
502 }
503 
504 EXPORT_SYMBOL(do_settimeofday);
505 
506 
507 /*
508  * In order to set the CMOS clock precisely, set_rtc_mmss has to be
509  * called 500 ms after the second nowtime has started, because when
510  * nowtime is written into the registers of the CMOS clock, it will
511  * jump to the next second precisely 500 ms later. Check the Motorola
512  * MC146818A or Dallas DS12887 data sheet for details.
513  *
514  * BUG: This routine does not handle hour overflow properly; it just
515  *      sets the minutes. Usually you won't notice until after reboot!
516  */
517 
518 
519 static int
520 set_rtc_mmss(unsigned long nowtime)
521 {
522 	int retval = 0;
523 	int real_seconds, real_minutes, cmos_minutes;
524 	unsigned char save_control, save_freq_select;
525 
526 	/* irq are locally disabled here */
527 	spin_lock(&rtc_lock);
528 	/* Tell the clock it's being set */
529 	save_control = CMOS_READ(RTC_CONTROL);
530 	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
531 
532 	/* Stop and reset prescaler */
533 	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
534 	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
535 
536 	cmos_minutes = CMOS_READ(RTC_MINUTES);
537 	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
538 		BCD_TO_BIN(cmos_minutes);
539 
540 	/*
541 	 * since we're only adjusting minutes and seconds,
542 	 * don't interfere with hour overflow. This avoids
543 	 * messing with unknown time zones but requires your
544 	 * RTC not to be off by more than 15 minutes
545 	 */
546 	real_seconds = nowtime % 60;
547 	real_minutes = nowtime / 60;
548 	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) {
549 		/* correct for half hour time zone */
550 		real_minutes += 30;
551 	}
552 	real_minutes %= 60;
553 
554 	if (abs(real_minutes - cmos_minutes) < 30) {
555 		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
556 			BIN_TO_BCD(real_seconds);
557 			BIN_TO_BCD(real_minutes);
558 		}
559 		CMOS_WRITE(real_seconds,RTC_SECONDS);
560 		CMOS_WRITE(real_minutes,RTC_MINUTES);
561 	} else {
562 		printk(KERN_WARNING
563 		       "set_rtc_mmss: can't update from %d to %d\n",
564 		       cmos_minutes, real_minutes);
565  		retval = -1;
566 	}
567 
568 	/* The following flags have to be released exactly in this order,
569 	 * otherwise the DS12887 (popular MC146818A clone with integrated
570 	 * battery and quartz) will not reset the oscillator and will not
571 	 * update precisely 500 ms later. You won't find this mentioned in
572 	 * the Dallas Semiconductor data sheets, but who believes data
573 	 * sheets anyway ...                           -- Markus Kuhn
574 	 */
575 	CMOS_WRITE(save_control, RTC_CONTROL);
576 	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
577 	spin_unlock(&rtc_lock);
578 
579 	return retval;
580 }
581