xref: /linux/arch/powerpc/kernel/time.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  * Common time routines among all ppc machines.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5  * Paul Mackerras' version and mine for PReP and Pmac.
6  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8  *
9  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10  * to make clock more stable (2.4.0-test5). The only thing
11  * that this code assumes is that the timebases have been synchronized
12  * by firmware on SMP and are never stopped (never do sleep
13  * on SMP then, nap and doze are OK).
14  *
15  * Speeded up do_gettimeofday by getting rid of references to
16  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17  *
18  * TODO (not necessarily in this file):
19  * - improve precision and reproducibility of timebase frequency
20  * measurement at boot time.
21  * - for astronomical applications: add a new function to get
22  * non ambiguous timestamps even around leap seconds. This needs
23  * a new timestamp format and a good name.
24  *
25  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
26  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
27  *
28  *      This program is free software; you can redistribute it and/or
29  *      modify it under the terms of the GNU General Public License
30  *      as published by the Free Software Foundation; either version
31  *      2 of the License, or (at your option) any later version.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/export.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/param.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/timex.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/time.h>
45 #include <linux/init.h>
46 #include <linux/profile.h>
47 #include <linux/cpu.h>
48 #include <linux/security.h>
49 #include <linux/percpu.h>
50 #include <linux/rtc.h>
51 #include <linux/jiffies.h>
52 #include <linux/posix-timers.h>
53 #include <linux/irq.h>
54 #include <linux/delay.h>
55 #include <linux/irq_work.h>
56 #include <asm/trace.h>
57 
58 #include <asm/io.h>
59 #include <asm/processor.h>
60 #include <asm/nvram.h>
61 #include <asm/cache.h>
62 #include <asm/machdep.h>
63 #include <asm/uaccess.h>
64 #include <asm/time.h>
65 #include <asm/prom.h>
66 #include <asm/irq.h>
67 #include <asm/div64.h>
68 #include <asm/smp.h>
69 #include <asm/vdso_datapage.h>
70 #include <asm/firmware.h>
71 #include <asm/cputime.h>
72 
73 /* powerpc clocksource/clockevent code */
74 
75 #include <linux/clockchips.h>
76 #include <linux/clocksource.h>
77 
78 static cycle_t rtc_read(struct clocksource *);
79 static struct clocksource clocksource_rtc = {
80 	.name         = "rtc",
81 	.rating       = 400,
82 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
83 	.mask         = CLOCKSOURCE_MASK(64),
84 	.read         = rtc_read,
85 };
86 
87 static cycle_t timebase_read(struct clocksource *);
88 static struct clocksource clocksource_timebase = {
89 	.name         = "timebase",
90 	.rating       = 400,
91 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
92 	.mask         = CLOCKSOURCE_MASK(64),
93 	.read         = timebase_read,
94 };
95 
96 #define DECREMENTER_MAX	0x7fffffff
97 
98 static int decrementer_set_next_event(unsigned long evt,
99 				      struct clock_event_device *dev);
100 static void decrementer_set_mode(enum clock_event_mode mode,
101 				 struct clock_event_device *dev);
102 
103 struct clock_event_device decrementer_clockevent = {
104 	.name           = "decrementer",
105 	.rating         = 200,
106 	.irq            = 0,
107 	.set_next_event = decrementer_set_next_event,
108 	.set_mode       = decrementer_set_mode,
109 	.features       = CLOCK_EVT_FEAT_ONESHOT,
110 };
111 EXPORT_SYMBOL(decrementer_clockevent);
112 
113 DEFINE_PER_CPU(u64, decrementers_next_tb);
114 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
115 
116 #define XSEC_PER_SEC (1024*1024)
117 
118 #ifdef CONFIG_PPC64
119 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
120 #else
121 /* compute ((xsec << 12) * max) >> 32 */
122 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
123 #endif
124 
125 unsigned long tb_ticks_per_jiffy;
126 unsigned long tb_ticks_per_usec = 100; /* sane default */
127 EXPORT_SYMBOL(tb_ticks_per_usec);
128 unsigned long tb_ticks_per_sec;
129 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
130 
131 DEFINE_SPINLOCK(rtc_lock);
132 EXPORT_SYMBOL_GPL(rtc_lock);
133 
134 static u64 tb_to_ns_scale __read_mostly;
135 static unsigned tb_to_ns_shift __read_mostly;
136 static u64 boot_tb __read_mostly;
137 
138 extern struct timezone sys_tz;
139 static long timezone_offset;
140 
141 unsigned long ppc_proc_freq;
142 EXPORT_SYMBOL_GPL(ppc_proc_freq);
143 unsigned long ppc_tb_freq;
144 EXPORT_SYMBOL_GPL(ppc_tb_freq);
145 
146 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
147 /*
148  * Factors for converting from cputime_t (timebase ticks) to
149  * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
150  * These are all stored as 0.64 fixed-point binary fractions.
151  */
152 u64 __cputime_jiffies_factor;
153 EXPORT_SYMBOL(__cputime_jiffies_factor);
154 u64 __cputime_usec_factor;
155 EXPORT_SYMBOL(__cputime_usec_factor);
156 u64 __cputime_sec_factor;
157 EXPORT_SYMBOL(__cputime_sec_factor);
158 u64 __cputime_clockt_factor;
159 EXPORT_SYMBOL(__cputime_clockt_factor);
160 DEFINE_PER_CPU(unsigned long, cputime_last_delta);
161 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
162 
163 cputime_t cputime_one_jiffy;
164 
165 void (*dtl_consumer)(struct dtl_entry *, u64);
166 
167 static void calc_cputime_factors(void)
168 {
169 	struct div_result res;
170 
171 	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
172 	__cputime_jiffies_factor = res.result_low;
173 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
174 	__cputime_usec_factor = res.result_low;
175 	div128_by_32(1, 0, tb_ticks_per_sec, &res);
176 	__cputime_sec_factor = res.result_low;
177 	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
178 	__cputime_clockt_factor = res.result_low;
179 }
180 
181 /*
182  * Read the SPURR on systems that have it, otherwise the PURR,
183  * or if that doesn't exist return the timebase value passed in.
184  */
185 static u64 read_spurr(u64 tb)
186 {
187 	if (cpu_has_feature(CPU_FTR_SPURR))
188 		return mfspr(SPRN_SPURR);
189 	if (cpu_has_feature(CPU_FTR_PURR))
190 		return mfspr(SPRN_PURR);
191 	return tb;
192 }
193 
194 #ifdef CONFIG_PPC_SPLPAR
195 
196 /*
197  * Scan the dispatch trace log and count up the stolen time.
198  * Should be called with interrupts disabled.
199  */
200 static u64 scan_dispatch_log(u64 stop_tb)
201 {
202 	u64 i = local_paca->dtl_ridx;
203 	struct dtl_entry *dtl = local_paca->dtl_curr;
204 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
205 	struct lppaca *vpa = local_paca->lppaca_ptr;
206 	u64 tb_delta;
207 	u64 stolen = 0;
208 	u64 dtb;
209 
210 	if (!dtl)
211 		return 0;
212 
213 	if (i == vpa->dtl_idx)
214 		return 0;
215 	while (i < vpa->dtl_idx) {
216 		if (dtl_consumer)
217 			dtl_consumer(dtl, i);
218 		dtb = dtl->timebase;
219 		tb_delta = dtl->enqueue_to_dispatch_time +
220 			dtl->ready_to_enqueue_time;
221 		barrier();
222 		if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
223 			/* buffer has overflowed */
224 			i = vpa->dtl_idx - N_DISPATCH_LOG;
225 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
226 			continue;
227 		}
228 		if (dtb > stop_tb)
229 			break;
230 		stolen += tb_delta;
231 		++i;
232 		++dtl;
233 		if (dtl == dtl_end)
234 			dtl = local_paca->dispatch_log;
235 	}
236 	local_paca->dtl_ridx = i;
237 	local_paca->dtl_curr = dtl;
238 	return stolen;
239 }
240 
241 /*
242  * Accumulate stolen time by scanning the dispatch trace log.
243  * Called on entry from user mode.
244  */
245 void accumulate_stolen_time(void)
246 {
247 	u64 sst, ust;
248 
249 	u8 save_soft_enabled = local_paca->soft_enabled;
250 
251 	/* We are called early in the exception entry, before
252 	 * soft/hard_enabled are sync'ed to the expected state
253 	 * for the exception. We are hard disabled but the PACA
254 	 * needs to reflect that so various debug stuff doesn't
255 	 * complain
256 	 */
257 	local_paca->soft_enabled = 0;
258 
259 	sst = scan_dispatch_log(local_paca->starttime_user);
260 	ust = scan_dispatch_log(local_paca->starttime);
261 	local_paca->system_time -= sst;
262 	local_paca->user_time -= ust;
263 	local_paca->stolen_time += ust + sst;
264 
265 	local_paca->soft_enabled = save_soft_enabled;
266 }
267 
268 static inline u64 calculate_stolen_time(u64 stop_tb)
269 {
270 	u64 stolen = 0;
271 
272 	if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
273 		stolen = scan_dispatch_log(stop_tb);
274 		get_paca()->system_time -= stolen;
275 	}
276 
277 	stolen += get_paca()->stolen_time;
278 	get_paca()->stolen_time = 0;
279 	return stolen;
280 }
281 
282 #else /* CONFIG_PPC_SPLPAR */
283 static inline u64 calculate_stolen_time(u64 stop_tb)
284 {
285 	return 0;
286 }
287 
288 #endif /* CONFIG_PPC_SPLPAR */
289 
290 /*
291  * Account time for a transition between system, hard irq
292  * or soft irq state.
293  */
294 void account_system_vtime(struct task_struct *tsk)
295 {
296 	u64 now, nowscaled, delta, deltascaled;
297 	unsigned long flags;
298 	u64 stolen, udelta, sys_scaled, user_scaled;
299 
300 	local_irq_save(flags);
301 	now = mftb();
302 	nowscaled = read_spurr(now);
303 	get_paca()->system_time += now - get_paca()->starttime;
304 	get_paca()->starttime = now;
305 	deltascaled = nowscaled - get_paca()->startspurr;
306 	get_paca()->startspurr = nowscaled;
307 
308 	stolen = calculate_stolen_time(now);
309 
310 	delta = get_paca()->system_time;
311 	get_paca()->system_time = 0;
312 	udelta = get_paca()->user_time - get_paca()->utime_sspurr;
313 	get_paca()->utime_sspurr = get_paca()->user_time;
314 
315 	/*
316 	 * Because we don't read the SPURR on every kernel entry/exit,
317 	 * deltascaled includes both user and system SPURR ticks.
318 	 * Apportion these ticks to system SPURR ticks and user
319 	 * SPURR ticks in the same ratio as the system time (delta)
320 	 * and user time (udelta) values obtained from the timebase
321 	 * over the same interval.  The system ticks get accounted here;
322 	 * the user ticks get saved up in paca->user_time_scaled to be
323 	 * used by account_process_tick.
324 	 */
325 	sys_scaled = delta;
326 	user_scaled = udelta;
327 	if (deltascaled != delta + udelta) {
328 		if (udelta) {
329 			sys_scaled = deltascaled * delta / (delta + udelta);
330 			user_scaled = deltascaled - sys_scaled;
331 		} else {
332 			sys_scaled = deltascaled;
333 		}
334 	}
335 	get_paca()->user_time_scaled += user_scaled;
336 
337 	if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
338 		account_system_time(tsk, 0, delta, sys_scaled);
339 		if (stolen)
340 			account_steal_time(stolen);
341 	} else {
342 		account_idle_time(delta + stolen);
343 	}
344 	local_irq_restore(flags);
345 }
346 EXPORT_SYMBOL_GPL(account_system_vtime);
347 
348 /*
349  * Transfer the user and system times accumulated in the paca
350  * by the exception entry and exit code to the generic process
351  * user and system time records.
352  * Must be called with interrupts disabled.
353  * Assumes that account_system_vtime() has been called recently
354  * (i.e. since the last entry from usermode) so that
355  * get_paca()->user_time_scaled is up to date.
356  */
357 void account_process_tick(struct task_struct *tsk, int user_tick)
358 {
359 	cputime_t utime, utimescaled;
360 
361 	utime = get_paca()->user_time;
362 	utimescaled = get_paca()->user_time_scaled;
363 	get_paca()->user_time = 0;
364 	get_paca()->user_time_scaled = 0;
365 	get_paca()->utime_sspurr = 0;
366 	account_user_time(tsk, utime, utimescaled);
367 }
368 
369 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
370 #define calc_cputime_factors()
371 #endif
372 
373 void __delay(unsigned long loops)
374 {
375 	unsigned long start;
376 	int diff;
377 
378 	if (__USE_RTC()) {
379 		start = get_rtcl();
380 		do {
381 			/* the RTCL register wraps at 1000000000 */
382 			diff = get_rtcl() - start;
383 			if (diff < 0)
384 				diff += 1000000000;
385 		} while (diff < loops);
386 	} else {
387 		start = get_tbl();
388 		while (get_tbl() - start < loops)
389 			HMT_low();
390 		HMT_medium();
391 	}
392 }
393 EXPORT_SYMBOL(__delay);
394 
395 void udelay(unsigned long usecs)
396 {
397 	__delay(tb_ticks_per_usec * usecs);
398 }
399 EXPORT_SYMBOL(udelay);
400 
401 #ifdef CONFIG_SMP
402 unsigned long profile_pc(struct pt_regs *regs)
403 {
404 	unsigned long pc = instruction_pointer(regs);
405 
406 	if (in_lock_functions(pc))
407 		return regs->link;
408 
409 	return pc;
410 }
411 EXPORT_SYMBOL(profile_pc);
412 #endif
413 
414 #ifdef CONFIG_IRQ_WORK
415 
416 /*
417  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
418  */
419 #ifdef CONFIG_PPC64
420 static inline unsigned long test_irq_work_pending(void)
421 {
422 	unsigned long x;
423 
424 	asm volatile("lbz %0,%1(13)"
425 		: "=r" (x)
426 		: "i" (offsetof(struct paca_struct, irq_work_pending)));
427 	return x;
428 }
429 
430 static inline void set_irq_work_pending_flag(void)
431 {
432 	asm volatile("stb %0,%1(13)" : :
433 		"r" (1),
434 		"i" (offsetof(struct paca_struct, irq_work_pending)));
435 }
436 
437 static inline void clear_irq_work_pending(void)
438 {
439 	asm volatile("stb %0,%1(13)" : :
440 		"r" (0),
441 		"i" (offsetof(struct paca_struct, irq_work_pending)));
442 }
443 
444 #else /* 32-bit */
445 
446 DEFINE_PER_CPU(u8, irq_work_pending);
447 
448 #define set_irq_work_pending_flag()	__get_cpu_var(irq_work_pending) = 1
449 #define test_irq_work_pending()		__get_cpu_var(irq_work_pending)
450 #define clear_irq_work_pending()	__get_cpu_var(irq_work_pending) = 0
451 
452 #endif /* 32 vs 64 bit */
453 
454 void arch_irq_work_raise(void)
455 {
456 	preempt_disable();
457 	set_irq_work_pending_flag();
458 	set_dec(1);
459 	preempt_enable();
460 }
461 
462 #else  /* CONFIG_IRQ_WORK */
463 
464 #define test_irq_work_pending()	0
465 #define clear_irq_work_pending()
466 
467 #endif /* CONFIG_IRQ_WORK */
468 
469 /*
470  * timer_interrupt - gets called when the decrementer overflows,
471  * with interrupts disabled.
472  */
473 void timer_interrupt(struct pt_regs * regs)
474 {
475 	struct pt_regs *old_regs;
476 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
477 	struct clock_event_device *evt = &__get_cpu_var(decrementers);
478 	u64 now;
479 
480 	/* Ensure a positive value is written to the decrementer, or else
481 	 * some CPUs will continue to take decrementer exceptions.
482 	 */
483 	set_dec(DECREMENTER_MAX);
484 
485 	/* Some implementations of hotplug will get timer interrupts while
486 	 * offline, just ignore these
487 	 */
488 	if (!cpu_online(smp_processor_id()))
489 		return;
490 
491 	/* Conditionally hard-enable interrupts now that the DEC has been
492 	 * bumped to its maximum value
493 	 */
494 	may_hard_irq_enable();
495 
496 	trace_timer_interrupt_entry(regs);
497 
498 	__get_cpu_var(irq_stat).timer_irqs++;
499 
500 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
501 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
502 		do_IRQ(regs);
503 #endif
504 
505 	old_regs = set_irq_regs(regs);
506 	irq_enter();
507 
508 	if (test_irq_work_pending()) {
509 		clear_irq_work_pending();
510 		irq_work_run();
511 	}
512 
513 	now = get_tb_or_rtc();
514 	if (now >= *next_tb) {
515 		*next_tb = ~(u64)0;
516 		if (evt->event_handler)
517 			evt->event_handler(evt);
518 	} else {
519 		now = *next_tb - now;
520 		if (now <= DECREMENTER_MAX)
521 			set_dec((int)now);
522 	}
523 
524 #ifdef CONFIG_PPC64
525 	/* collect purr register values often, for accurate calculations */
526 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
527 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
528 		cu->current_tb = mfspr(SPRN_PURR);
529 	}
530 #endif
531 
532 	irq_exit();
533 	set_irq_regs(old_regs);
534 
535 	trace_timer_interrupt_exit(regs);
536 }
537 
538 #ifdef CONFIG_SUSPEND
539 static void generic_suspend_disable_irqs(void)
540 {
541 	/* Disable the decrementer, so that it doesn't interfere
542 	 * with suspending.
543 	 */
544 
545 	set_dec(DECREMENTER_MAX);
546 	local_irq_disable();
547 	set_dec(DECREMENTER_MAX);
548 }
549 
550 static void generic_suspend_enable_irqs(void)
551 {
552 	local_irq_enable();
553 }
554 
555 /* Overrides the weak version in kernel/power/main.c */
556 void arch_suspend_disable_irqs(void)
557 {
558 	if (ppc_md.suspend_disable_irqs)
559 		ppc_md.suspend_disable_irqs();
560 	generic_suspend_disable_irqs();
561 }
562 
563 /* Overrides the weak version in kernel/power/main.c */
564 void arch_suspend_enable_irqs(void)
565 {
566 	generic_suspend_enable_irqs();
567 	if (ppc_md.suspend_enable_irqs)
568 		ppc_md.suspend_enable_irqs();
569 }
570 #endif
571 
572 /*
573  * Scheduler clock - returns current time in nanosec units.
574  *
575  * Note: mulhdu(a, b) (multiply high double unsigned) returns
576  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
577  * are 64-bit unsigned numbers.
578  */
579 unsigned long long sched_clock(void)
580 {
581 	if (__USE_RTC())
582 		return get_rtc();
583 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
584 }
585 
586 static int __init get_freq(char *name, int cells, unsigned long *val)
587 {
588 	struct device_node *cpu;
589 	const unsigned int *fp;
590 	int found = 0;
591 
592 	/* The cpu node should have timebase and clock frequency properties */
593 	cpu = of_find_node_by_type(NULL, "cpu");
594 
595 	if (cpu) {
596 		fp = of_get_property(cpu, name, NULL);
597 		if (fp) {
598 			found = 1;
599 			*val = of_read_ulong(fp, cells);
600 		}
601 
602 		of_node_put(cpu);
603 	}
604 
605 	return found;
606 }
607 
608 /* should become __cpuinit when secondary_cpu_time_init also is */
609 void start_cpu_decrementer(void)
610 {
611 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
612 	/* Clear any pending timer interrupts */
613 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
614 
615 	/* Enable decrementer interrupt */
616 	mtspr(SPRN_TCR, TCR_DIE);
617 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
618 }
619 
620 void __init generic_calibrate_decr(void)
621 {
622 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
623 
624 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
625 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
626 
627 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
628 				"(not found)\n");
629 	}
630 
631 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
632 
633 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
634 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
635 
636 		printk(KERN_ERR "WARNING: Estimating processor frequency "
637 				"(not found)\n");
638 	}
639 }
640 
641 int update_persistent_clock(struct timespec now)
642 {
643 	struct rtc_time tm;
644 
645 	if (!ppc_md.set_rtc_time)
646 		return 0;
647 
648 	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
649 	tm.tm_year -= 1900;
650 	tm.tm_mon -= 1;
651 
652 	return ppc_md.set_rtc_time(&tm);
653 }
654 
655 static void __read_persistent_clock(struct timespec *ts)
656 {
657 	struct rtc_time tm;
658 	static int first = 1;
659 
660 	ts->tv_nsec = 0;
661 	/* XXX this is a litle fragile but will work okay in the short term */
662 	if (first) {
663 		first = 0;
664 		if (ppc_md.time_init)
665 			timezone_offset = ppc_md.time_init();
666 
667 		/* get_boot_time() isn't guaranteed to be safe to call late */
668 		if (ppc_md.get_boot_time) {
669 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
670 			return;
671 		}
672 	}
673 	if (!ppc_md.get_rtc_time) {
674 		ts->tv_sec = 0;
675 		return;
676 	}
677 	ppc_md.get_rtc_time(&tm);
678 
679 	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
680 			    tm.tm_hour, tm.tm_min, tm.tm_sec);
681 }
682 
683 void read_persistent_clock(struct timespec *ts)
684 {
685 	__read_persistent_clock(ts);
686 
687 	/* Sanitize it in case real time clock is set below EPOCH */
688 	if (ts->tv_sec < 0) {
689 		ts->tv_sec = 0;
690 		ts->tv_nsec = 0;
691 	}
692 
693 }
694 
695 /* clocksource code */
696 static cycle_t rtc_read(struct clocksource *cs)
697 {
698 	return (cycle_t)get_rtc();
699 }
700 
701 static cycle_t timebase_read(struct clocksource *cs)
702 {
703 	return (cycle_t)get_tb();
704 }
705 
706 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
707 			struct clocksource *clock, u32 mult)
708 {
709 	u64 new_tb_to_xs, new_stamp_xsec;
710 	u32 frac_sec;
711 
712 	if (clock != &clocksource_timebase)
713 		return;
714 
715 	/* Make userspace gettimeofday spin until we're done. */
716 	++vdso_data->tb_update_count;
717 	smp_mb();
718 
719 	/* 19342813113834067 ~= 2^(20+64) / 1e9 */
720 	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
721 	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
722 	do_div(new_stamp_xsec, 1000000000);
723 	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
724 
725 	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
726 	/* this is tv_nsec / 1e9 as a 0.32 fraction */
727 	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
728 
729 	/*
730 	 * tb_update_count is used to allow the userspace gettimeofday code
731 	 * to assure itself that it sees a consistent view of the tb_to_xs and
732 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
733 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
734 	 * the two values of tb_update_count match and are even then the
735 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
736 	 * loops back and reads them again until this criteria is met.
737 	 * We expect the caller to have done the first increment of
738 	 * vdso_data->tb_update_count already.
739 	 */
740 	vdso_data->tb_orig_stamp = clock->cycle_last;
741 	vdso_data->stamp_xsec = new_stamp_xsec;
742 	vdso_data->tb_to_xs = new_tb_to_xs;
743 	vdso_data->wtom_clock_sec = wtm->tv_sec;
744 	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
745 	vdso_data->stamp_xtime = *wall_time;
746 	vdso_data->stamp_sec_fraction = frac_sec;
747 	smp_wmb();
748 	++(vdso_data->tb_update_count);
749 }
750 
751 void update_vsyscall_tz(void)
752 {
753 	/* Make userspace gettimeofday spin until we're done. */
754 	++vdso_data->tb_update_count;
755 	smp_mb();
756 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
757 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
758 	smp_mb();
759 	++vdso_data->tb_update_count;
760 }
761 
762 static void __init clocksource_init(void)
763 {
764 	struct clocksource *clock;
765 
766 	if (__USE_RTC())
767 		clock = &clocksource_rtc;
768 	else
769 		clock = &clocksource_timebase;
770 
771 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
772 		printk(KERN_ERR "clocksource: %s is already registered\n",
773 		       clock->name);
774 		return;
775 	}
776 
777 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
778 	       clock->name, clock->mult, clock->shift);
779 }
780 
781 static int decrementer_set_next_event(unsigned long evt,
782 				      struct clock_event_device *dev)
783 {
784 	__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
785 	set_dec(evt);
786 	return 0;
787 }
788 
789 static void decrementer_set_mode(enum clock_event_mode mode,
790 				 struct clock_event_device *dev)
791 {
792 	if (mode != CLOCK_EVT_MODE_ONESHOT)
793 		decrementer_set_next_event(DECREMENTER_MAX, dev);
794 }
795 
796 static void register_decrementer_clockevent(int cpu)
797 {
798 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
799 
800 	*dec = decrementer_clockevent;
801 	dec->cpumask = cpumask_of(cpu);
802 
803 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
804 		    dec->name, dec->mult, dec->shift, cpu);
805 
806 	clockevents_register_device(dec);
807 }
808 
809 static void __init init_decrementer_clockevent(void)
810 {
811 	int cpu = smp_processor_id();
812 
813 	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
814 
815 	decrementer_clockevent.max_delta_ns =
816 		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
817 	decrementer_clockevent.min_delta_ns =
818 		clockevent_delta2ns(2, &decrementer_clockevent);
819 
820 	register_decrementer_clockevent(cpu);
821 }
822 
823 void secondary_cpu_time_init(void)
824 {
825 	/* Start the decrementer on CPUs that have manual control
826 	 * such as BookE
827 	 */
828 	start_cpu_decrementer();
829 
830 	/* FIME: Should make unrelatred change to move snapshot_timebase
831 	 * call here ! */
832 	register_decrementer_clockevent(smp_processor_id());
833 }
834 
835 /* This function is only called on the boot processor */
836 void __init time_init(void)
837 {
838 	struct div_result res;
839 	u64 scale;
840 	unsigned shift;
841 
842 	if (__USE_RTC()) {
843 		/* 601 processor: dec counts down by 128 every 128ns */
844 		ppc_tb_freq = 1000000000;
845 	} else {
846 		/* Normal PowerPC with timebase register */
847 		ppc_md.calibrate_decr();
848 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
849 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
850 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
851 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
852 	}
853 
854 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
855 	tb_ticks_per_sec = ppc_tb_freq;
856 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
857 	calc_cputime_factors();
858 	setup_cputime_one_jiffy();
859 
860 	/*
861 	 * Compute scale factor for sched_clock.
862 	 * The calibrate_decr() function has set tb_ticks_per_sec,
863 	 * which is the timebase frequency.
864 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
865 	 * the 128-bit result as a 64.64 fixed-point number.
866 	 * We then shift that number right until it is less than 1.0,
867 	 * giving us the scale factor and shift count to use in
868 	 * sched_clock().
869 	 */
870 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
871 	scale = res.result_low;
872 	for (shift = 0; res.result_high != 0; ++shift) {
873 		scale = (scale >> 1) | (res.result_high << 63);
874 		res.result_high >>= 1;
875 	}
876 	tb_to_ns_scale = scale;
877 	tb_to_ns_shift = shift;
878 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
879 	boot_tb = get_tb_or_rtc();
880 
881 	/* If platform provided a timezone (pmac), we correct the time */
882 	if (timezone_offset) {
883 		sys_tz.tz_minuteswest = -timezone_offset / 60;
884 		sys_tz.tz_dsttime = 0;
885 	}
886 
887 	vdso_data->tb_update_count = 0;
888 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
889 
890 	/* Start the decrementer on CPUs that have manual control
891 	 * such as BookE
892 	 */
893 	start_cpu_decrementer();
894 
895 	/* Register the clocksource */
896 	clocksource_init();
897 
898 	init_decrementer_clockevent();
899 }
900 
901 
902 #define FEBRUARY	2
903 #define	STARTOFTIME	1970
904 #define SECDAY		86400L
905 #define SECYR		(SECDAY * 365)
906 #define	leapyear(year)		((year) % 4 == 0 && \
907 				 ((year) % 100 != 0 || (year) % 400 == 0))
908 #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
909 #define	days_in_month(a) 	(month_days[(a) - 1])
910 
911 static int month_days[12] = {
912 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
913 };
914 
915 /*
916  * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
917  */
918 void GregorianDay(struct rtc_time * tm)
919 {
920 	int leapsToDate;
921 	int lastYear;
922 	int day;
923 	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
924 
925 	lastYear = tm->tm_year - 1;
926 
927 	/*
928 	 * Number of leap corrections to apply up to end of last year
929 	 */
930 	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
931 
932 	/*
933 	 * This year is a leap year if it is divisible by 4 except when it is
934 	 * divisible by 100 unless it is divisible by 400
935 	 *
936 	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
937 	 */
938 	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
939 
940 	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
941 		   tm->tm_mday;
942 
943 	tm->tm_wday = day % 7;
944 }
945 
946 void to_tm(int tim, struct rtc_time * tm)
947 {
948 	register int    i;
949 	register long   hms, day;
950 
951 	day = tim / SECDAY;
952 	hms = tim % SECDAY;
953 
954 	/* Hours, minutes, seconds are easy */
955 	tm->tm_hour = hms / 3600;
956 	tm->tm_min = (hms % 3600) / 60;
957 	tm->tm_sec = (hms % 3600) % 60;
958 
959 	/* Number of years in days */
960 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
961 		day -= days_in_year(i);
962 	tm->tm_year = i;
963 
964 	/* Number of months in days left */
965 	if (leapyear(tm->tm_year))
966 		days_in_month(FEBRUARY) = 29;
967 	for (i = 1; day >= days_in_month(i); i++)
968 		day -= days_in_month(i);
969 	days_in_month(FEBRUARY) = 28;
970 	tm->tm_mon = i;
971 
972 	/* Days are what is left over (+1) from all that. */
973 	tm->tm_mday = day + 1;
974 
975 	/*
976 	 * Determine the day of week
977 	 */
978 	GregorianDay(tm);
979 }
980 
981 /*
982  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
983  * result.
984  */
985 void div128_by_32(u64 dividend_high, u64 dividend_low,
986 		  unsigned divisor, struct div_result *dr)
987 {
988 	unsigned long a, b, c, d;
989 	unsigned long w, x, y, z;
990 	u64 ra, rb, rc;
991 
992 	a = dividend_high >> 32;
993 	b = dividend_high & 0xffffffff;
994 	c = dividend_low >> 32;
995 	d = dividend_low & 0xffffffff;
996 
997 	w = a / divisor;
998 	ra = ((u64)(a - (w * divisor)) << 32) + b;
999 
1000 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1001 	x = ra;
1002 
1003 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1004 	y = rb;
1005 
1006 	do_div(rc, divisor);
1007 	z = rc;
1008 
1009 	dr->result_high = ((u64)w << 32) + x;
1010 	dr->result_low  = ((u64)y << 32) + z;
1011 
1012 }
1013 
1014 /* We don't need to calibrate delay, we use the CPU timebase for that */
1015 void calibrate_delay(void)
1016 {
1017 	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1018 	 * as the number of __delay(1) in a jiffy, so make it so
1019 	 */
1020 	loops_per_jiffy = tb_ticks_per_jiffy;
1021 }
1022 
1023 static int __init rtc_init(void)
1024 {
1025 	struct platform_device *pdev;
1026 
1027 	if (!ppc_md.get_rtc_time)
1028 		return -ENODEV;
1029 
1030 	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1031 	if (IS_ERR(pdev))
1032 		return PTR_ERR(pdev);
1033 
1034 	return 0;
1035 }
1036 
1037 module_init(rtc_init);
1038