xref: /linux/arch/powerpc/kernel/time.c (revision a3a4a816b4b194c45d0217e8b9e08b2639802cda)
1 /*
2  * Common time routines among all ppc machines.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5  * Paul Mackerras' version and mine for PReP and Pmac.
6  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8  *
9  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10  * to make clock more stable (2.4.0-test5). The only thing
11  * that this code assumes is that the timebases have been synchronized
12  * by firmware on SMP and are never stopped (never do sleep
13  * on SMP then, nap and doze are OK).
14  *
15  * Speeded up do_gettimeofday by getting rid of references to
16  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17  *
18  * TODO (not necessarily in this file):
19  * - improve precision and reproducibility of timebase frequency
20  * measurement at boot time.
21  * - for astronomical applications: add a new function to get
22  * non ambiguous timestamps even around leap seconds. This needs
23  * a new timestamp format and a good name.
24  *
25  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
26  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
27  *
28  *      This program is free software; you can redistribute it and/or
29  *      modify it under the terms of the GNU General Public License
30  *      as published by the Free Software Foundation; either version
31  *      2 of the License, or (at your option) any later version.
32  */
33 
34 #include <linux/errno.h>
35 #include <linux/export.h>
36 #include <linux/sched.h>
37 #include <linux/sched/clock.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/clockchips.h>
47 #include <linux/init.h>
48 #include <linux/profile.h>
49 #include <linux/cpu.h>
50 #include <linux/security.h>
51 #include <linux/percpu.h>
52 #include <linux/rtc.h>
53 #include <linux/jiffies.h>
54 #include <linux/posix-timers.h>
55 #include <linux/irq.h>
56 #include <linux/delay.h>
57 #include <linux/irq_work.h>
58 #include <linux/clk-provider.h>
59 #include <linux/suspend.h>
60 #include <linux/rtc.h>
61 #include <linux/sched/cputime.h>
62 #include <asm/trace.h>
63 
64 #include <asm/io.h>
65 #include <asm/processor.h>
66 #include <asm/nvram.h>
67 #include <asm/cache.h>
68 #include <asm/machdep.h>
69 #include <linux/uaccess.h>
70 #include <asm/time.h>
71 #include <asm/prom.h>
72 #include <asm/irq.h>
73 #include <asm/div64.h>
74 #include <asm/smp.h>
75 #include <asm/vdso_datapage.h>
76 #include <asm/firmware.h>
77 #include <asm/asm-prototypes.h>
78 
79 /* powerpc clocksource/clockevent code */
80 
81 #include <linux/clockchips.h>
82 #include <linux/timekeeper_internal.h>
83 
84 static u64 rtc_read(struct clocksource *);
85 static struct clocksource clocksource_rtc = {
86 	.name         = "rtc",
87 	.rating       = 400,
88 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
89 	.mask         = CLOCKSOURCE_MASK(64),
90 	.read         = rtc_read,
91 };
92 
93 static u64 timebase_read(struct clocksource *);
94 static struct clocksource clocksource_timebase = {
95 	.name         = "timebase",
96 	.rating       = 400,
97 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
98 	.mask         = CLOCKSOURCE_MASK(64),
99 	.read         = timebase_read,
100 };
101 
102 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
103 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
104 
105 static int decrementer_set_next_event(unsigned long evt,
106 				      struct clock_event_device *dev);
107 static int decrementer_shutdown(struct clock_event_device *evt);
108 
109 struct clock_event_device decrementer_clockevent = {
110 	.name			= "decrementer",
111 	.rating			= 200,
112 	.irq			= 0,
113 	.set_next_event		= decrementer_set_next_event,
114 	.set_state_shutdown	= decrementer_shutdown,
115 	.tick_resume		= decrementer_shutdown,
116 	.features		= CLOCK_EVT_FEAT_ONESHOT |
117 				  CLOCK_EVT_FEAT_C3STOP,
118 };
119 EXPORT_SYMBOL(decrementer_clockevent);
120 
121 DEFINE_PER_CPU(u64, decrementers_next_tb);
122 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
123 
124 #define XSEC_PER_SEC (1024*1024)
125 
126 #ifdef CONFIG_PPC64
127 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
128 #else
129 /* compute ((xsec << 12) * max) >> 32 */
130 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
131 #endif
132 
133 unsigned long tb_ticks_per_jiffy;
134 unsigned long tb_ticks_per_usec = 100; /* sane default */
135 EXPORT_SYMBOL(tb_ticks_per_usec);
136 unsigned long tb_ticks_per_sec;
137 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
138 
139 DEFINE_SPINLOCK(rtc_lock);
140 EXPORT_SYMBOL_GPL(rtc_lock);
141 
142 static u64 tb_to_ns_scale __read_mostly;
143 static unsigned tb_to_ns_shift __read_mostly;
144 static u64 boot_tb __read_mostly;
145 
146 extern struct timezone sys_tz;
147 static long timezone_offset;
148 
149 unsigned long ppc_proc_freq;
150 EXPORT_SYMBOL_GPL(ppc_proc_freq);
151 unsigned long ppc_tb_freq;
152 EXPORT_SYMBOL_GPL(ppc_tb_freq);
153 
154 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
155 /*
156  * Factor for converting from cputime_t (timebase ticks) to
157  * microseconds. This is stored as 0.64 fixed-point binary fraction.
158  */
159 u64 __cputime_usec_factor;
160 EXPORT_SYMBOL(__cputime_usec_factor);
161 
162 #ifdef CONFIG_PPC_SPLPAR
163 void (*dtl_consumer)(struct dtl_entry *, u64);
164 #endif
165 
166 #ifdef CONFIG_PPC64
167 #define get_accounting(tsk)	(&get_paca()->accounting)
168 #else
169 #define get_accounting(tsk)	(&task_thread_info(tsk)->accounting)
170 #endif
171 
172 static void calc_cputime_factors(void)
173 {
174 	struct div_result res;
175 
176 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
177 	__cputime_usec_factor = res.result_low;
178 }
179 
180 /*
181  * Read the SPURR on systems that have it, otherwise the PURR,
182  * or if that doesn't exist return the timebase value passed in.
183  */
184 static unsigned long read_spurr(unsigned long tb)
185 {
186 	if (cpu_has_feature(CPU_FTR_SPURR))
187 		return mfspr(SPRN_SPURR);
188 	if (cpu_has_feature(CPU_FTR_PURR))
189 		return mfspr(SPRN_PURR);
190 	return tb;
191 }
192 
193 #ifdef CONFIG_PPC_SPLPAR
194 
195 /*
196  * Scan the dispatch trace log and count up the stolen time.
197  * Should be called with interrupts disabled.
198  */
199 static u64 scan_dispatch_log(u64 stop_tb)
200 {
201 	u64 i = local_paca->dtl_ridx;
202 	struct dtl_entry *dtl = local_paca->dtl_curr;
203 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
204 	struct lppaca *vpa = local_paca->lppaca_ptr;
205 	u64 tb_delta;
206 	u64 stolen = 0;
207 	u64 dtb;
208 
209 	if (!dtl)
210 		return 0;
211 
212 	if (i == be64_to_cpu(vpa->dtl_idx))
213 		return 0;
214 	while (i < be64_to_cpu(vpa->dtl_idx)) {
215 		dtb = be64_to_cpu(dtl->timebase);
216 		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
217 			be32_to_cpu(dtl->ready_to_enqueue_time);
218 		barrier();
219 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
220 			/* buffer has overflowed */
221 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
222 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
223 			continue;
224 		}
225 		if (dtb > stop_tb)
226 			break;
227 		if (dtl_consumer)
228 			dtl_consumer(dtl, i);
229 		stolen += tb_delta;
230 		++i;
231 		++dtl;
232 		if (dtl == dtl_end)
233 			dtl = local_paca->dispatch_log;
234 	}
235 	local_paca->dtl_ridx = i;
236 	local_paca->dtl_curr = dtl;
237 	return stolen;
238 }
239 
240 /*
241  * Accumulate stolen time by scanning the dispatch trace log.
242  * Called on entry from user mode.
243  */
244 void accumulate_stolen_time(void)
245 {
246 	u64 sst, ust;
247 	u8 save_soft_enabled = local_paca->soft_enabled;
248 	struct cpu_accounting_data *acct = &local_paca->accounting;
249 
250 	/* We are called early in the exception entry, before
251 	 * soft/hard_enabled are sync'ed to the expected state
252 	 * for the exception. We are hard disabled but the PACA
253 	 * needs to reflect that so various debug stuff doesn't
254 	 * complain
255 	 */
256 	local_paca->soft_enabled = 0;
257 
258 	sst = scan_dispatch_log(acct->starttime_user);
259 	ust = scan_dispatch_log(acct->starttime);
260 	acct->stime -= sst;
261 	acct->utime -= ust;
262 	acct->steal_time += ust + sst;
263 
264 	local_paca->soft_enabled = save_soft_enabled;
265 }
266 
267 static inline u64 calculate_stolen_time(u64 stop_tb)
268 {
269 	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
270 		return scan_dispatch_log(stop_tb);
271 
272 	return 0;
273 }
274 
275 #else /* CONFIG_PPC_SPLPAR */
276 static inline u64 calculate_stolen_time(u64 stop_tb)
277 {
278 	return 0;
279 }
280 
281 #endif /* CONFIG_PPC_SPLPAR */
282 
283 /*
284  * Account time for a transition between system, hard irq
285  * or soft irq state.
286  */
287 static unsigned long vtime_delta(struct task_struct *tsk,
288 				 unsigned long *stime_scaled,
289 				 unsigned long *steal_time)
290 {
291 	unsigned long now, nowscaled, deltascaled;
292 	unsigned long stime;
293 	unsigned long utime, utime_scaled;
294 	struct cpu_accounting_data *acct = get_accounting(tsk);
295 
296 	WARN_ON_ONCE(!irqs_disabled());
297 
298 	now = mftb();
299 	nowscaled = read_spurr(now);
300 	stime = now - acct->starttime;
301 	acct->starttime = now;
302 	deltascaled = nowscaled - acct->startspurr;
303 	acct->startspurr = nowscaled;
304 
305 	*steal_time = calculate_stolen_time(now);
306 
307 	utime = acct->utime - acct->utime_sspurr;
308 	acct->utime_sspurr = acct->utime;
309 
310 	/*
311 	 * Because we don't read the SPURR on every kernel entry/exit,
312 	 * deltascaled includes both user and system SPURR ticks.
313 	 * Apportion these ticks to system SPURR ticks and user
314 	 * SPURR ticks in the same ratio as the system time (delta)
315 	 * and user time (udelta) values obtained from the timebase
316 	 * over the same interval.  The system ticks get accounted here;
317 	 * the user ticks get saved up in paca->user_time_scaled to be
318 	 * used by account_process_tick.
319 	 */
320 	*stime_scaled = stime;
321 	utime_scaled = utime;
322 	if (deltascaled != stime + utime) {
323 		if (utime) {
324 			*stime_scaled = deltascaled * stime / (stime + utime);
325 			utime_scaled = deltascaled - *stime_scaled;
326 		} else {
327 			*stime_scaled = deltascaled;
328 		}
329 	}
330 	acct->utime_scaled += utime_scaled;
331 
332 	return stime;
333 }
334 
335 void vtime_account_system(struct task_struct *tsk)
336 {
337 	unsigned long stime, stime_scaled, steal_time;
338 	struct cpu_accounting_data *acct = get_accounting(tsk);
339 
340 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
341 
342 	stime -= min(stime, steal_time);
343 	acct->steal_time += steal_time;
344 
345 	if ((tsk->flags & PF_VCPU) && !irq_count()) {
346 		acct->gtime += stime;
347 		acct->utime_scaled += stime_scaled;
348 	} else {
349 		if (hardirq_count())
350 			acct->hardirq_time += stime;
351 		else if (in_serving_softirq())
352 			acct->softirq_time += stime;
353 		else
354 			acct->stime += stime;
355 
356 		acct->stime_scaled += stime_scaled;
357 	}
358 }
359 EXPORT_SYMBOL_GPL(vtime_account_system);
360 
361 void vtime_account_idle(struct task_struct *tsk)
362 {
363 	unsigned long stime, stime_scaled, steal_time;
364 	struct cpu_accounting_data *acct = get_accounting(tsk);
365 
366 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
367 	acct->idle_time += stime + steal_time;
368 }
369 
370 /*
371  * Account the whole cputime accumulated in the paca
372  * Must be called with interrupts disabled.
373  * Assumes that vtime_account_system/idle() has been called
374  * recently (i.e. since the last entry from usermode) so that
375  * get_paca()->user_time_scaled is up to date.
376  */
377 void vtime_flush(struct task_struct *tsk)
378 {
379 	struct cpu_accounting_data *acct = get_accounting(tsk);
380 
381 	if (acct->utime)
382 		account_user_time(tsk, cputime_to_nsecs(acct->utime));
383 
384 	if (acct->utime_scaled)
385 		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
386 
387 	if (acct->gtime)
388 		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
389 
390 	if (acct->steal_time)
391 		account_steal_time(cputime_to_nsecs(acct->steal_time));
392 
393 	if (acct->idle_time)
394 		account_idle_time(cputime_to_nsecs(acct->idle_time));
395 
396 	if (acct->stime)
397 		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
398 					  CPUTIME_SYSTEM);
399 	if (acct->stime_scaled)
400 		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
401 
402 	if (acct->hardirq_time)
403 		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
404 					  CPUTIME_IRQ);
405 	if (acct->softirq_time)
406 		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
407 					  CPUTIME_SOFTIRQ);
408 
409 	acct->utime = 0;
410 	acct->utime_scaled = 0;
411 	acct->utime_sspurr = 0;
412 	acct->gtime = 0;
413 	acct->steal_time = 0;
414 	acct->idle_time = 0;
415 	acct->stime = 0;
416 	acct->stime_scaled = 0;
417 	acct->hardirq_time = 0;
418 	acct->softirq_time = 0;
419 }
420 
421 #ifdef CONFIG_PPC32
422 /*
423  * Called from the context switch with interrupts disabled, to charge all
424  * accumulated times to the current process, and to prepare accounting on
425  * the next process.
426  */
427 void arch_vtime_task_switch(struct task_struct *prev)
428 {
429 	struct cpu_accounting_data *acct = get_accounting(current);
430 
431 	acct->starttime = get_accounting(prev)->starttime;
432 	acct->startspurr = get_accounting(prev)->startspurr;
433 }
434 #endif /* CONFIG_PPC32 */
435 
436 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
437 #define calc_cputime_factors()
438 #endif
439 
440 void __delay(unsigned long loops)
441 {
442 	unsigned long start;
443 	int diff;
444 
445 	if (__USE_RTC()) {
446 		start = get_rtcl();
447 		do {
448 			/* the RTCL register wraps at 1000000000 */
449 			diff = get_rtcl() - start;
450 			if (diff < 0)
451 				diff += 1000000000;
452 		} while (diff < loops);
453 	} else {
454 		start = get_tbl();
455 		while (get_tbl() - start < loops)
456 			HMT_low();
457 		HMT_medium();
458 	}
459 }
460 EXPORT_SYMBOL(__delay);
461 
462 void udelay(unsigned long usecs)
463 {
464 	__delay(tb_ticks_per_usec * usecs);
465 }
466 EXPORT_SYMBOL(udelay);
467 
468 #ifdef CONFIG_SMP
469 unsigned long profile_pc(struct pt_regs *regs)
470 {
471 	unsigned long pc = instruction_pointer(regs);
472 
473 	if (in_lock_functions(pc))
474 		return regs->link;
475 
476 	return pc;
477 }
478 EXPORT_SYMBOL(profile_pc);
479 #endif
480 
481 #ifdef CONFIG_IRQ_WORK
482 
483 /*
484  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
485  */
486 #ifdef CONFIG_PPC64
487 static inline unsigned long test_irq_work_pending(void)
488 {
489 	unsigned long x;
490 
491 	asm volatile("lbz %0,%1(13)"
492 		: "=r" (x)
493 		: "i" (offsetof(struct paca_struct, irq_work_pending)));
494 	return x;
495 }
496 
497 static inline void set_irq_work_pending_flag(void)
498 {
499 	asm volatile("stb %0,%1(13)" : :
500 		"r" (1),
501 		"i" (offsetof(struct paca_struct, irq_work_pending)));
502 }
503 
504 static inline void clear_irq_work_pending(void)
505 {
506 	asm volatile("stb %0,%1(13)" : :
507 		"r" (0),
508 		"i" (offsetof(struct paca_struct, irq_work_pending)));
509 }
510 
511 #else /* 32-bit */
512 
513 DEFINE_PER_CPU(u8, irq_work_pending);
514 
515 #define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
516 #define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
517 #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
518 
519 #endif /* 32 vs 64 bit */
520 
521 void arch_irq_work_raise(void)
522 {
523 	preempt_disable();
524 	set_irq_work_pending_flag();
525 	set_dec(1);
526 	preempt_enable();
527 }
528 
529 #else  /* CONFIG_IRQ_WORK */
530 
531 #define test_irq_work_pending()	0
532 #define clear_irq_work_pending()
533 
534 #endif /* CONFIG_IRQ_WORK */
535 
536 static void __timer_interrupt(void)
537 {
538 	struct pt_regs *regs = get_irq_regs();
539 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
540 	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
541 	u64 now;
542 
543 	trace_timer_interrupt_entry(regs);
544 
545 	if (test_irq_work_pending()) {
546 		clear_irq_work_pending();
547 		irq_work_run();
548 	}
549 
550 	now = get_tb_or_rtc();
551 	if (now >= *next_tb) {
552 		*next_tb = ~(u64)0;
553 		if (evt->event_handler)
554 			evt->event_handler(evt);
555 		__this_cpu_inc(irq_stat.timer_irqs_event);
556 	} else {
557 		now = *next_tb - now;
558 		if (now <= decrementer_max)
559 			set_dec(now);
560 		/* We may have raced with new irq work */
561 		if (test_irq_work_pending())
562 			set_dec(1);
563 		__this_cpu_inc(irq_stat.timer_irqs_others);
564 	}
565 
566 #ifdef CONFIG_PPC64
567 	/* collect purr register values often, for accurate calculations */
568 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
569 		struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
570 		cu->current_tb = mfspr(SPRN_PURR);
571 	}
572 #endif
573 
574 	trace_timer_interrupt_exit(regs);
575 }
576 
577 /*
578  * timer_interrupt - gets called when the decrementer overflows,
579  * with interrupts disabled.
580  */
581 void timer_interrupt(struct pt_regs * regs)
582 {
583 	struct pt_regs *old_regs;
584 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
585 
586 	/* Ensure a positive value is written to the decrementer, or else
587 	 * some CPUs will continue to take decrementer exceptions.
588 	 */
589 	set_dec(decrementer_max);
590 
591 	/* Some implementations of hotplug will get timer interrupts while
592 	 * offline, just ignore these and we also need to set
593 	 * decrementers_next_tb as MAX to make sure __check_irq_replay
594 	 * don't replay timer interrupt when return, otherwise we'll trap
595 	 * here infinitely :(
596 	 */
597 	if (!cpu_online(smp_processor_id())) {
598 		*next_tb = ~(u64)0;
599 		return;
600 	}
601 
602 	/* Conditionally hard-enable interrupts now that the DEC has been
603 	 * bumped to its maximum value
604 	 */
605 	may_hard_irq_enable();
606 
607 
608 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
609 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
610 		do_IRQ(regs);
611 #endif
612 
613 	old_regs = set_irq_regs(regs);
614 	irq_enter();
615 
616 	__timer_interrupt();
617 	irq_exit();
618 	set_irq_regs(old_regs);
619 }
620 EXPORT_SYMBOL(timer_interrupt);
621 
622 /*
623  * Hypervisor decrementer interrupts shouldn't occur but are sometimes
624  * left pending on exit from a KVM guest.  We don't need to do anything
625  * to clear them, as they are edge-triggered.
626  */
627 void hdec_interrupt(struct pt_regs *regs)
628 {
629 }
630 
631 #ifdef CONFIG_SUSPEND
632 static void generic_suspend_disable_irqs(void)
633 {
634 	/* Disable the decrementer, so that it doesn't interfere
635 	 * with suspending.
636 	 */
637 
638 	set_dec(decrementer_max);
639 	local_irq_disable();
640 	set_dec(decrementer_max);
641 }
642 
643 static void generic_suspend_enable_irqs(void)
644 {
645 	local_irq_enable();
646 }
647 
648 /* Overrides the weak version in kernel/power/main.c */
649 void arch_suspend_disable_irqs(void)
650 {
651 	if (ppc_md.suspend_disable_irqs)
652 		ppc_md.suspend_disable_irqs();
653 	generic_suspend_disable_irqs();
654 }
655 
656 /* Overrides the weak version in kernel/power/main.c */
657 void arch_suspend_enable_irqs(void)
658 {
659 	generic_suspend_enable_irqs();
660 	if (ppc_md.suspend_enable_irqs)
661 		ppc_md.suspend_enable_irqs();
662 }
663 #endif
664 
665 unsigned long long tb_to_ns(unsigned long long ticks)
666 {
667 	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
668 }
669 EXPORT_SYMBOL_GPL(tb_to_ns);
670 
671 /*
672  * Scheduler clock - returns current time in nanosec units.
673  *
674  * Note: mulhdu(a, b) (multiply high double unsigned) returns
675  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
676  * are 64-bit unsigned numbers.
677  */
678 unsigned long long sched_clock(void)
679 {
680 	if (__USE_RTC())
681 		return get_rtc();
682 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
683 }
684 
685 
686 #ifdef CONFIG_PPC_PSERIES
687 
688 /*
689  * Running clock - attempts to give a view of time passing for a virtualised
690  * kernels.
691  * Uses the VTB register if available otherwise a next best guess.
692  */
693 unsigned long long running_clock(void)
694 {
695 	/*
696 	 * Don't read the VTB as a host since KVM does not switch in host
697 	 * timebase into the VTB when it takes a guest off the CPU, reading the
698 	 * VTB would result in reading 'last switched out' guest VTB.
699 	 *
700 	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
701 	 * would be unsafe to rely only on the #ifdef above.
702 	 */
703 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
704 	    cpu_has_feature(CPU_FTR_ARCH_207S))
705 		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
706 
707 	/*
708 	 * This is a next best approximation without a VTB.
709 	 * On a host which is running bare metal there should never be any stolen
710 	 * time and on a host which doesn't do any virtualisation TB *should* equal
711 	 * VTB so it makes no difference anyway.
712 	 */
713 	return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
714 }
715 #endif
716 
717 static int __init get_freq(char *name, int cells, unsigned long *val)
718 {
719 	struct device_node *cpu;
720 	const __be32 *fp;
721 	int found = 0;
722 
723 	/* The cpu node should have timebase and clock frequency properties */
724 	cpu = of_find_node_by_type(NULL, "cpu");
725 
726 	if (cpu) {
727 		fp = of_get_property(cpu, name, NULL);
728 		if (fp) {
729 			found = 1;
730 			*val = of_read_ulong(fp, cells);
731 		}
732 
733 		of_node_put(cpu);
734 	}
735 
736 	return found;
737 }
738 
739 static void start_cpu_decrementer(void)
740 {
741 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
742 	/* Clear any pending timer interrupts */
743 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
744 
745 	/* Enable decrementer interrupt */
746 	mtspr(SPRN_TCR, TCR_DIE);
747 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
748 }
749 
750 void __init generic_calibrate_decr(void)
751 {
752 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
753 
754 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
755 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
756 
757 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
758 				"(not found)\n");
759 	}
760 
761 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
762 
763 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
764 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
765 
766 		printk(KERN_ERR "WARNING: Estimating processor frequency "
767 				"(not found)\n");
768 	}
769 }
770 
771 int update_persistent_clock(struct timespec now)
772 {
773 	struct rtc_time tm;
774 
775 	if (!ppc_md.set_rtc_time)
776 		return -ENODEV;
777 
778 	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
779 	tm.tm_year -= 1900;
780 	tm.tm_mon -= 1;
781 
782 	return ppc_md.set_rtc_time(&tm);
783 }
784 
785 static void __read_persistent_clock(struct timespec *ts)
786 {
787 	struct rtc_time tm;
788 	static int first = 1;
789 
790 	ts->tv_nsec = 0;
791 	/* XXX this is a litle fragile but will work okay in the short term */
792 	if (first) {
793 		first = 0;
794 		if (ppc_md.time_init)
795 			timezone_offset = ppc_md.time_init();
796 
797 		/* get_boot_time() isn't guaranteed to be safe to call late */
798 		if (ppc_md.get_boot_time) {
799 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
800 			return;
801 		}
802 	}
803 	if (!ppc_md.get_rtc_time) {
804 		ts->tv_sec = 0;
805 		return;
806 	}
807 	ppc_md.get_rtc_time(&tm);
808 
809 	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
810 			    tm.tm_hour, tm.tm_min, tm.tm_sec);
811 }
812 
813 void read_persistent_clock(struct timespec *ts)
814 {
815 	__read_persistent_clock(ts);
816 
817 	/* Sanitize it in case real time clock is set below EPOCH */
818 	if (ts->tv_sec < 0) {
819 		ts->tv_sec = 0;
820 		ts->tv_nsec = 0;
821 	}
822 
823 }
824 
825 /* clocksource code */
826 static u64 rtc_read(struct clocksource *cs)
827 {
828 	return (u64)get_rtc();
829 }
830 
831 static u64 timebase_read(struct clocksource *cs)
832 {
833 	return (u64)get_tb();
834 }
835 
836 void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
837 			 struct clocksource *clock, u32 mult, u64 cycle_last)
838 {
839 	u64 new_tb_to_xs, new_stamp_xsec;
840 	u32 frac_sec;
841 
842 	if (clock != &clocksource_timebase)
843 		return;
844 
845 	/* Make userspace gettimeofday spin until we're done. */
846 	++vdso_data->tb_update_count;
847 	smp_mb();
848 
849 	/* 19342813113834067 ~= 2^(20+64) / 1e9 */
850 	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
851 	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
852 	do_div(new_stamp_xsec, 1000000000);
853 	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
854 
855 	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
856 	/* this is tv_nsec / 1e9 as a 0.32 fraction */
857 	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
858 
859 	/*
860 	 * tb_update_count is used to allow the userspace gettimeofday code
861 	 * to assure itself that it sees a consistent view of the tb_to_xs and
862 	 * stamp_xsec variables.  It reads the tb_update_count, then reads
863 	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
864 	 * the two values of tb_update_count match and are even then the
865 	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
866 	 * loops back and reads them again until this criteria is met.
867 	 * We expect the caller to have done the first increment of
868 	 * vdso_data->tb_update_count already.
869 	 */
870 	vdso_data->tb_orig_stamp = cycle_last;
871 	vdso_data->stamp_xsec = new_stamp_xsec;
872 	vdso_data->tb_to_xs = new_tb_to_xs;
873 	vdso_data->wtom_clock_sec = wtm->tv_sec;
874 	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
875 	vdso_data->stamp_xtime = *wall_time;
876 	vdso_data->stamp_sec_fraction = frac_sec;
877 	smp_wmb();
878 	++(vdso_data->tb_update_count);
879 }
880 
881 void update_vsyscall_tz(void)
882 {
883 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
884 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
885 }
886 
887 static void __init clocksource_init(void)
888 {
889 	struct clocksource *clock;
890 
891 	if (__USE_RTC())
892 		clock = &clocksource_rtc;
893 	else
894 		clock = &clocksource_timebase;
895 
896 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
897 		printk(KERN_ERR "clocksource: %s is already registered\n",
898 		       clock->name);
899 		return;
900 	}
901 
902 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
903 	       clock->name, clock->mult, clock->shift);
904 }
905 
906 static int decrementer_set_next_event(unsigned long evt,
907 				      struct clock_event_device *dev)
908 {
909 	__this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
910 	set_dec(evt);
911 
912 	/* We may have raced with new irq work */
913 	if (test_irq_work_pending())
914 		set_dec(1);
915 
916 	return 0;
917 }
918 
919 static int decrementer_shutdown(struct clock_event_device *dev)
920 {
921 	decrementer_set_next_event(decrementer_max, dev);
922 	return 0;
923 }
924 
925 /* Interrupt handler for the timer broadcast IPI */
926 void tick_broadcast_ipi_handler(void)
927 {
928 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
929 
930 	*next_tb = get_tb_or_rtc();
931 	__timer_interrupt();
932 }
933 
934 static void register_decrementer_clockevent(int cpu)
935 {
936 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
937 
938 	*dec = decrementer_clockevent;
939 	dec->cpumask = cpumask_of(cpu);
940 
941 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
942 		    dec->name, dec->mult, dec->shift, cpu);
943 
944 	clockevents_register_device(dec);
945 }
946 
947 static void enable_large_decrementer(void)
948 {
949 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
950 		return;
951 
952 	if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
953 		return;
954 
955 	/*
956 	 * If we're running as the hypervisor we need to enable the LD manually
957 	 * otherwise firmware should have done it for us.
958 	 */
959 	if (cpu_has_feature(CPU_FTR_HVMODE))
960 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
961 }
962 
963 static void __init set_decrementer_max(void)
964 {
965 	struct device_node *cpu;
966 	u32 bits = 32;
967 
968 	/* Prior to ISAv3 the decrementer is always 32 bit */
969 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
970 		return;
971 
972 	cpu = of_find_node_by_type(NULL, "cpu");
973 
974 	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
975 		if (bits > 64 || bits < 32) {
976 			pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
977 			bits = 32;
978 		}
979 
980 		/* calculate the signed maximum given this many bits */
981 		decrementer_max = (1ul << (bits - 1)) - 1;
982 	}
983 
984 	of_node_put(cpu);
985 
986 	pr_info("time_init: %u bit decrementer (max: %llx)\n",
987 		bits, decrementer_max);
988 }
989 
990 static void __init init_decrementer_clockevent(void)
991 {
992 	int cpu = smp_processor_id();
993 
994 	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
995 
996 	decrementer_clockevent.max_delta_ns =
997 		clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
998 	decrementer_clockevent.min_delta_ns =
999 		clockevent_delta2ns(2, &decrementer_clockevent);
1000 
1001 	register_decrementer_clockevent(cpu);
1002 }
1003 
1004 void secondary_cpu_time_init(void)
1005 {
1006 	/* Enable and test the large decrementer for this cpu */
1007 	enable_large_decrementer();
1008 
1009 	/* Start the decrementer on CPUs that have manual control
1010 	 * such as BookE
1011 	 */
1012 	start_cpu_decrementer();
1013 
1014 	/* FIME: Should make unrelatred change to move snapshot_timebase
1015 	 * call here ! */
1016 	register_decrementer_clockevent(smp_processor_id());
1017 }
1018 
1019 /* This function is only called on the boot processor */
1020 void __init time_init(void)
1021 {
1022 	struct div_result res;
1023 	u64 scale;
1024 	unsigned shift;
1025 
1026 	if (__USE_RTC()) {
1027 		/* 601 processor: dec counts down by 128 every 128ns */
1028 		ppc_tb_freq = 1000000000;
1029 	} else {
1030 		/* Normal PowerPC with timebase register */
1031 		ppc_md.calibrate_decr();
1032 		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1033 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1034 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
1035 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1036 	}
1037 
1038 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1039 	tb_ticks_per_sec = ppc_tb_freq;
1040 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
1041 	calc_cputime_factors();
1042 
1043 	/*
1044 	 * Compute scale factor for sched_clock.
1045 	 * The calibrate_decr() function has set tb_ticks_per_sec,
1046 	 * which is the timebase frequency.
1047 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1048 	 * the 128-bit result as a 64.64 fixed-point number.
1049 	 * We then shift that number right until it is less than 1.0,
1050 	 * giving us the scale factor and shift count to use in
1051 	 * sched_clock().
1052 	 */
1053 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1054 	scale = res.result_low;
1055 	for (shift = 0; res.result_high != 0; ++shift) {
1056 		scale = (scale >> 1) | (res.result_high << 63);
1057 		res.result_high >>= 1;
1058 	}
1059 	tb_to_ns_scale = scale;
1060 	tb_to_ns_shift = shift;
1061 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1062 	boot_tb = get_tb_or_rtc();
1063 
1064 	/* If platform provided a timezone (pmac), we correct the time */
1065 	if (timezone_offset) {
1066 		sys_tz.tz_minuteswest = -timezone_offset / 60;
1067 		sys_tz.tz_dsttime = 0;
1068 	}
1069 
1070 	vdso_data->tb_update_count = 0;
1071 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1072 
1073 	/* initialise and enable the large decrementer (if we have one) */
1074 	set_decrementer_max();
1075 	enable_large_decrementer();
1076 
1077 	/* Start the decrementer on CPUs that have manual control
1078 	 * such as BookE
1079 	 */
1080 	start_cpu_decrementer();
1081 
1082 	/* Register the clocksource */
1083 	clocksource_init();
1084 
1085 	init_decrementer_clockevent();
1086 	tick_setup_hrtimer_broadcast();
1087 
1088 #ifdef CONFIG_COMMON_CLK
1089 	of_clk_init(NULL);
1090 #endif
1091 }
1092 
1093 
1094 #define FEBRUARY	2
1095 #define	STARTOFTIME	1970
1096 #define SECDAY		86400L
1097 #define SECYR		(SECDAY * 365)
1098 #define	leapyear(year)		((year) % 4 == 0 && \
1099 				 ((year) % 100 != 0 || (year) % 400 == 0))
1100 #define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
1101 #define	days_in_month(a) 	(month_days[(a) - 1])
1102 
1103 static int month_days[12] = {
1104 	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1105 };
1106 
1107 void to_tm(int tim, struct rtc_time * tm)
1108 {
1109 	register int    i;
1110 	register long   hms, day;
1111 
1112 	day = tim / SECDAY;
1113 	hms = tim % SECDAY;
1114 
1115 	/* Hours, minutes, seconds are easy */
1116 	tm->tm_hour = hms / 3600;
1117 	tm->tm_min = (hms % 3600) / 60;
1118 	tm->tm_sec = (hms % 3600) % 60;
1119 
1120 	/* Number of years in days */
1121 	for (i = STARTOFTIME; day >= days_in_year(i); i++)
1122 		day -= days_in_year(i);
1123 	tm->tm_year = i;
1124 
1125 	/* Number of months in days left */
1126 	if (leapyear(tm->tm_year))
1127 		days_in_month(FEBRUARY) = 29;
1128 	for (i = 1; day >= days_in_month(i); i++)
1129 		day -= days_in_month(i);
1130 	days_in_month(FEBRUARY) = 28;
1131 	tm->tm_mon = i;
1132 
1133 	/* Days are what is left over (+1) from all that. */
1134 	tm->tm_mday = day + 1;
1135 
1136 	/*
1137 	 * No-one uses the day of the week.
1138 	 */
1139 	tm->tm_wday = -1;
1140 }
1141 EXPORT_SYMBOL(to_tm);
1142 
1143 /*
1144  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1145  * result.
1146  */
1147 void div128_by_32(u64 dividend_high, u64 dividend_low,
1148 		  unsigned divisor, struct div_result *dr)
1149 {
1150 	unsigned long a, b, c, d;
1151 	unsigned long w, x, y, z;
1152 	u64 ra, rb, rc;
1153 
1154 	a = dividend_high >> 32;
1155 	b = dividend_high & 0xffffffff;
1156 	c = dividend_low >> 32;
1157 	d = dividend_low & 0xffffffff;
1158 
1159 	w = a / divisor;
1160 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1161 
1162 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1163 	x = ra;
1164 
1165 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1166 	y = rb;
1167 
1168 	do_div(rc, divisor);
1169 	z = rc;
1170 
1171 	dr->result_high = ((u64)w << 32) + x;
1172 	dr->result_low  = ((u64)y << 32) + z;
1173 
1174 }
1175 
1176 /* We don't need to calibrate delay, we use the CPU timebase for that */
1177 void calibrate_delay(void)
1178 {
1179 	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1180 	 * as the number of __delay(1) in a jiffy, so make it so
1181 	 */
1182 	loops_per_jiffy = tb_ticks_per_jiffy;
1183 }
1184 
1185 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1186 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1187 {
1188 	ppc_md.get_rtc_time(tm);
1189 	return rtc_valid_tm(tm);
1190 }
1191 
1192 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1193 {
1194 	if (!ppc_md.set_rtc_time)
1195 		return -EOPNOTSUPP;
1196 
1197 	if (ppc_md.set_rtc_time(tm) < 0)
1198 		return -EOPNOTSUPP;
1199 
1200 	return 0;
1201 }
1202 
1203 static const struct rtc_class_ops rtc_generic_ops = {
1204 	.read_time = rtc_generic_get_time,
1205 	.set_time = rtc_generic_set_time,
1206 };
1207 
1208 static int __init rtc_init(void)
1209 {
1210 	struct platform_device *pdev;
1211 
1212 	if (!ppc_md.get_rtc_time)
1213 		return -ENODEV;
1214 
1215 	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1216 					     &rtc_generic_ops,
1217 					     sizeof(rtc_generic_ops));
1218 
1219 	return PTR_ERR_OR_ZERO(pdev);
1220 }
1221 
1222 device_initcall(rtc_init);
1223 #endif
1224