xref: /linux/arch/powerpc/kernel/time.c (revision 9581991a60817abe311c2581ae4554b28bfa32f1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Common time routines among all ppc machines.
4  *
5  * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6  * Paul Mackerras' version and mine for PReP and Pmac.
7  * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8  * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9  *
10  * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11  * to make clock more stable (2.4.0-test5). The only thing
12  * that this code assumes is that the timebases have been synchronized
13  * by firmware on SMP and are never stopped (never do sleep
14  * on SMP then, nap and doze are OK).
15  *
16  * Speeded up do_gettimeofday by getting rid of references to
17  * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18  *
19  * TODO (not necessarily in this file):
20  * - improve precision and reproducibility of timebase frequency
21  * measurement at boot time.
22  * - for astronomical applications: add a new function to get
23  * non ambiguous timestamps even around leap seconds. This needs
24  * a new timestamp format and a good name.
25  *
26  * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
27  *             "A Kernel Model for Precision Timekeeping" by Dave Mills
28  */
29 
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/sched.h>
33 #include <linux/sched/clock.h>
34 #include <linux/sched/cputime.h>
35 #include <linux/kernel.h>
36 #include <linux/param.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/interrupt.h>
40 #include <linux/timex.h>
41 #include <linux/kernel_stat.h>
42 #include <linux/time.h>
43 #include <linux/init.h>
44 #include <linux/profile.h>
45 #include <linux/cpu.h>
46 #include <linux/security.h>
47 #include <linux/percpu.h>
48 #include <linux/rtc.h>
49 #include <linux/jiffies.h>
50 #include <linux/posix-timers.h>
51 #include <linux/irq.h>
52 #include <linux/delay.h>
53 #include <linux/irq_work.h>
54 #include <linux/of_clk.h>
55 #include <linux/suspend.h>
56 #include <linux/processor.h>
57 #include <asm/trace.h>
58 
59 #include <asm/interrupt.h>
60 #include <asm/io.h>
61 #include <asm/nvram.h>
62 #include <asm/cache.h>
63 #include <asm/machdep.h>
64 #include <linux/uaccess.h>
65 #include <asm/time.h>
66 #include <asm/prom.h>
67 #include <asm/irq.h>
68 #include <asm/div64.h>
69 #include <asm/smp.h>
70 #include <asm/vdso_datapage.h>
71 #include <asm/firmware.h>
72 #include <asm/asm-prototypes.h>
73 
74 /* powerpc clocksource/clockevent code */
75 
76 #include <linux/clockchips.h>
77 #include <linux/timekeeper_internal.h>
78 
79 static u64 timebase_read(struct clocksource *);
80 static struct clocksource clocksource_timebase = {
81 	.name         = "timebase",
82 	.rating       = 400,
83 	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
84 	.mask         = CLOCKSOURCE_MASK(64),
85 	.read         = timebase_read,
86 	.vdso_clock_mode	= VDSO_CLOCKMODE_ARCHTIMER,
87 };
88 
89 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
90 u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
91 EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
92 
93 static int decrementer_set_next_event(unsigned long evt,
94 				      struct clock_event_device *dev);
95 static int decrementer_shutdown(struct clock_event_device *evt);
96 
97 struct clock_event_device decrementer_clockevent = {
98 	.name			= "decrementer",
99 	.rating			= 200,
100 	.irq			= 0,
101 	.set_next_event		= decrementer_set_next_event,
102 	.set_state_oneshot_stopped = decrementer_shutdown,
103 	.set_state_shutdown	= decrementer_shutdown,
104 	.tick_resume		= decrementer_shutdown,
105 	.features		= CLOCK_EVT_FEAT_ONESHOT |
106 				  CLOCK_EVT_FEAT_C3STOP,
107 };
108 EXPORT_SYMBOL(decrementer_clockevent);
109 
110 DEFINE_PER_CPU(u64, decrementers_next_tb);
111 EXPORT_SYMBOL_GPL(decrementers_next_tb);
112 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
113 
114 #define XSEC_PER_SEC (1024*1024)
115 
116 #ifdef CONFIG_PPC64
117 #define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
118 #else
119 /* compute ((xsec << 12) * max) >> 32 */
120 #define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
121 #endif
122 
123 unsigned long tb_ticks_per_jiffy;
124 unsigned long tb_ticks_per_usec = 100; /* sane default */
125 EXPORT_SYMBOL(tb_ticks_per_usec);
126 unsigned long tb_ticks_per_sec;
127 EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
128 
129 DEFINE_SPINLOCK(rtc_lock);
130 EXPORT_SYMBOL_GPL(rtc_lock);
131 
132 static u64 tb_to_ns_scale __read_mostly;
133 static unsigned tb_to_ns_shift __read_mostly;
134 static u64 boot_tb __read_mostly;
135 
136 extern struct timezone sys_tz;
137 static long timezone_offset;
138 
139 unsigned long ppc_proc_freq;
140 EXPORT_SYMBOL_GPL(ppc_proc_freq);
141 unsigned long ppc_tb_freq;
142 EXPORT_SYMBOL_GPL(ppc_tb_freq);
143 
144 bool tb_invalid;
145 
146 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
147 /*
148  * Factor for converting from cputime_t (timebase ticks) to
149  * microseconds. This is stored as 0.64 fixed-point binary fraction.
150  */
151 u64 __cputime_usec_factor;
152 EXPORT_SYMBOL(__cputime_usec_factor);
153 
154 #ifdef CONFIG_PPC_SPLPAR
155 void (*dtl_consumer)(struct dtl_entry *, u64);
156 #endif
157 
158 static void calc_cputime_factors(void)
159 {
160 	struct div_result res;
161 
162 	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
163 	__cputime_usec_factor = res.result_low;
164 }
165 
166 /*
167  * Read the SPURR on systems that have it, otherwise the PURR,
168  * or if that doesn't exist return the timebase value passed in.
169  */
170 static inline unsigned long read_spurr(unsigned long tb)
171 {
172 	if (cpu_has_feature(CPU_FTR_SPURR))
173 		return mfspr(SPRN_SPURR);
174 	if (cpu_has_feature(CPU_FTR_PURR))
175 		return mfspr(SPRN_PURR);
176 	return tb;
177 }
178 
179 #ifdef CONFIG_PPC_SPLPAR
180 
181 #include <asm/dtl.h>
182 
183 /*
184  * Scan the dispatch trace log and count up the stolen time.
185  * Should be called with interrupts disabled.
186  */
187 static u64 scan_dispatch_log(u64 stop_tb)
188 {
189 	u64 i = local_paca->dtl_ridx;
190 	struct dtl_entry *dtl = local_paca->dtl_curr;
191 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
192 	struct lppaca *vpa = local_paca->lppaca_ptr;
193 	u64 tb_delta;
194 	u64 stolen = 0;
195 	u64 dtb;
196 
197 	if (!dtl)
198 		return 0;
199 
200 	if (i == be64_to_cpu(vpa->dtl_idx))
201 		return 0;
202 	while (i < be64_to_cpu(vpa->dtl_idx)) {
203 		dtb = be64_to_cpu(dtl->timebase);
204 		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
205 			be32_to_cpu(dtl->ready_to_enqueue_time);
206 		barrier();
207 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
208 			/* buffer has overflowed */
209 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
210 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
211 			continue;
212 		}
213 		if (dtb > stop_tb)
214 			break;
215 		if (dtl_consumer)
216 			dtl_consumer(dtl, i);
217 		stolen += tb_delta;
218 		++i;
219 		++dtl;
220 		if (dtl == dtl_end)
221 			dtl = local_paca->dispatch_log;
222 	}
223 	local_paca->dtl_ridx = i;
224 	local_paca->dtl_curr = dtl;
225 	return stolen;
226 }
227 
228 /*
229  * Accumulate stolen time by scanning the dispatch trace log.
230  * Called on entry from user mode.
231  */
232 void notrace accumulate_stolen_time(void)
233 {
234 	u64 sst, ust;
235 	struct cpu_accounting_data *acct = &local_paca->accounting;
236 
237 	sst = scan_dispatch_log(acct->starttime_user);
238 	ust = scan_dispatch_log(acct->starttime);
239 	acct->stime -= sst;
240 	acct->utime -= ust;
241 	acct->steal_time += ust + sst;
242 }
243 
244 static inline u64 calculate_stolen_time(u64 stop_tb)
245 {
246 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
247 		return 0;
248 
249 	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
250 		return scan_dispatch_log(stop_tb);
251 
252 	return 0;
253 }
254 
255 #else /* CONFIG_PPC_SPLPAR */
256 static inline u64 calculate_stolen_time(u64 stop_tb)
257 {
258 	return 0;
259 }
260 
261 #endif /* CONFIG_PPC_SPLPAR */
262 
263 /*
264  * Account time for a transition between system, hard irq
265  * or soft irq state.
266  */
267 static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
268 					unsigned long now, unsigned long stime)
269 {
270 	unsigned long stime_scaled = 0;
271 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
272 	unsigned long nowscaled, deltascaled;
273 	unsigned long utime, utime_scaled;
274 
275 	nowscaled = read_spurr(now);
276 	deltascaled = nowscaled - acct->startspurr;
277 	acct->startspurr = nowscaled;
278 	utime = acct->utime - acct->utime_sspurr;
279 	acct->utime_sspurr = acct->utime;
280 
281 	/*
282 	 * Because we don't read the SPURR on every kernel entry/exit,
283 	 * deltascaled includes both user and system SPURR ticks.
284 	 * Apportion these ticks to system SPURR ticks and user
285 	 * SPURR ticks in the same ratio as the system time (delta)
286 	 * and user time (udelta) values obtained from the timebase
287 	 * over the same interval.  The system ticks get accounted here;
288 	 * the user ticks get saved up in paca->user_time_scaled to be
289 	 * used by account_process_tick.
290 	 */
291 	stime_scaled = stime;
292 	utime_scaled = utime;
293 	if (deltascaled != stime + utime) {
294 		if (utime) {
295 			stime_scaled = deltascaled * stime / (stime + utime);
296 			utime_scaled = deltascaled - stime_scaled;
297 		} else {
298 			stime_scaled = deltascaled;
299 		}
300 	}
301 	acct->utime_scaled += utime_scaled;
302 #endif
303 
304 	return stime_scaled;
305 }
306 
307 static unsigned long vtime_delta(struct cpu_accounting_data *acct,
308 				 unsigned long *stime_scaled,
309 				 unsigned long *steal_time)
310 {
311 	unsigned long now, stime;
312 
313 	WARN_ON_ONCE(!irqs_disabled());
314 
315 	now = mftb();
316 	stime = now - acct->starttime;
317 	acct->starttime = now;
318 
319 	*stime_scaled = vtime_delta_scaled(acct, now, stime);
320 
321 	*steal_time = calculate_stolen_time(now);
322 
323 	return stime;
324 }
325 
326 static void vtime_delta_kernel(struct cpu_accounting_data *acct,
327 			       unsigned long *stime, unsigned long *stime_scaled)
328 {
329 	unsigned long steal_time;
330 
331 	*stime = vtime_delta(acct, stime_scaled, &steal_time);
332 	*stime -= min(*stime, steal_time);
333 	acct->steal_time += steal_time;
334 }
335 
336 void vtime_account_kernel(struct task_struct *tsk)
337 {
338 	struct cpu_accounting_data *acct = get_accounting(tsk);
339 	unsigned long stime, stime_scaled;
340 
341 	vtime_delta_kernel(acct, &stime, &stime_scaled);
342 
343 	if (tsk->flags & PF_VCPU) {
344 		acct->gtime += stime;
345 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
346 		acct->utime_scaled += stime_scaled;
347 #endif
348 	} else {
349 		acct->stime += stime;
350 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
351 		acct->stime_scaled += stime_scaled;
352 #endif
353 	}
354 }
355 EXPORT_SYMBOL_GPL(vtime_account_kernel);
356 
357 void vtime_account_idle(struct task_struct *tsk)
358 {
359 	unsigned long stime, stime_scaled, steal_time;
360 	struct cpu_accounting_data *acct = get_accounting(tsk);
361 
362 	stime = vtime_delta(acct, &stime_scaled, &steal_time);
363 	acct->idle_time += stime + steal_time;
364 }
365 
366 static void vtime_account_irq_field(struct cpu_accounting_data *acct,
367 				    unsigned long *field)
368 {
369 	unsigned long stime, stime_scaled;
370 
371 	vtime_delta_kernel(acct, &stime, &stime_scaled);
372 	*field += stime;
373 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
374 	acct->stime_scaled += stime_scaled;
375 #endif
376 }
377 
378 void vtime_account_softirq(struct task_struct *tsk)
379 {
380 	struct cpu_accounting_data *acct = get_accounting(tsk);
381 	vtime_account_irq_field(acct, &acct->softirq_time);
382 }
383 
384 void vtime_account_hardirq(struct task_struct *tsk)
385 {
386 	struct cpu_accounting_data *acct = get_accounting(tsk);
387 	vtime_account_irq_field(acct, &acct->hardirq_time);
388 }
389 
390 static void vtime_flush_scaled(struct task_struct *tsk,
391 			       struct cpu_accounting_data *acct)
392 {
393 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
394 	if (acct->utime_scaled)
395 		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
396 	if (acct->stime_scaled)
397 		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
398 
399 	acct->utime_scaled = 0;
400 	acct->utime_sspurr = 0;
401 	acct->stime_scaled = 0;
402 #endif
403 }
404 
405 /*
406  * Account the whole cputime accumulated in the paca
407  * Must be called with interrupts disabled.
408  * Assumes that vtime_account_kernel/idle() has been called
409  * recently (i.e. since the last entry from usermode) so that
410  * get_paca()->user_time_scaled is up to date.
411  */
412 void vtime_flush(struct task_struct *tsk)
413 {
414 	struct cpu_accounting_data *acct = get_accounting(tsk);
415 
416 	if (acct->utime)
417 		account_user_time(tsk, cputime_to_nsecs(acct->utime));
418 
419 	if (acct->gtime)
420 		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
421 
422 	if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
423 		account_steal_time(cputime_to_nsecs(acct->steal_time));
424 		acct->steal_time = 0;
425 	}
426 
427 	if (acct->idle_time)
428 		account_idle_time(cputime_to_nsecs(acct->idle_time));
429 
430 	if (acct->stime)
431 		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
432 					  CPUTIME_SYSTEM);
433 
434 	if (acct->hardirq_time)
435 		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
436 					  CPUTIME_IRQ);
437 	if (acct->softirq_time)
438 		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
439 					  CPUTIME_SOFTIRQ);
440 
441 	vtime_flush_scaled(tsk, acct);
442 
443 	acct->utime = 0;
444 	acct->gtime = 0;
445 	acct->idle_time = 0;
446 	acct->stime = 0;
447 	acct->hardirq_time = 0;
448 	acct->softirq_time = 0;
449 }
450 
451 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
452 #define calc_cputime_factors()
453 #endif
454 
455 void __delay(unsigned long loops)
456 {
457 	unsigned long start;
458 
459 	spin_begin();
460 	if (tb_invalid) {
461 		/*
462 		 * TB is in error state and isn't ticking anymore.
463 		 * HMI handler was unable to recover from TB error.
464 		 * Return immediately, so that kernel won't get stuck here.
465 		 */
466 		spin_cpu_relax();
467 	} else {
468 		start = mftb();
469 		while (mftb() - start < loops)
470 			spin_cpu_relax();
471 	}
472 	spin_end();
473 }
474 EXPORT_SYMBOL(__delay);
475 
476 void udelay(unsigned long usecs)
477 {
478 	__delay(tb_ticks_per_usec * usecs);
479 }
480 EXPORT_SYMBOL(udelay);
481 
482 #ifdef CONFIG_SMP
483 unsigned long profile_pc(struct pt_regs *regs)
484 {
485 	unsigned long pc = instruction_pointer(regs);
486 
487 	if (in_lock_functions(pc))
488 		return regs->link;
489 
490 	return pc;
491 }
492 EXPORT_SYMBOL(profile_pc);
493 #endif
494 
495 #ifdef CONFIG_IRQ_WORK
496 
497 /*
498  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
499  */
500 #ifdef CONFIG_PPC64
501 static inline void set_irq_work_pending_flag(void)
502 {
503 	asm volatile("stb %0,%1(13)" : :
504 		"r" (1),
505 		"i" (offsetof(struct paca_struct, irq_work_pending)));
506 }
507 
508 static inline void clear_irq_work_pending(void)
509 {
510 	asm volatile("stb %0,%1(13)" : :
511 		"r" (0),
512 		"i" (offsetof(struct paca_struct, irq_work_pending)));
513 }
514 
515 #else /* 32-bit */
516 
517 DEFINE_PER_CPU(u8, irq_work_pending);
518 
519 #define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
520 #define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
521 #define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
522 
523 #endif /* 32 vs 64 bit */
524 
525 void arch_irq_work_raise(void)
526 {
527 	/*
528 	 * 64-bit code that uses irq soft-mask can just cause an immediate
529 	 * interrupt here that gets soft masked, if this is called under
530 	 * local_irq_disable(). It might be possible to prevent that happening
531 	 * by noticing interrupts are disabled and setting decrementer pending
532 	 * to be replayed when irqs are enabled. The problem there is that
533 	 * tracing can call irq_work_raise, including in code that does low
534 	 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
535 	 * which could get tangled up if we're messing with the same state
536 	 * here.
537 	 */
538 	preempt_disable();
539 	set_irq_work_pending_flag();
540 	set_dec(1);
541 	preempt_enable();
542 }
543 
544 #else  /* CONFIG_IRQ_WORK */
545 
546 #define test_irq_work_pending()	0
547 #define clear_irq_work_pending()
548 
549 #endif /* CONFIG_IRQ_WORK */
550 
551 /*
552  * timer_interrupt - gets called when the decrementer overflows,
553  * with interrupts disabled.
554  */
555 DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
556 {
557 	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
558 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
559 	struct pt_regs *old_regs;
560 	u64 now;
561 
562 	/*
563 	 * Some implementations of hotplug will get timer interrupts while
564 	 * offline, just ignore these.
565 	 */
566 	if (unlikely(!cpu_online(smp_processor_id()))) {
567 		set_dec(decrementer_max);
568 		return;
569 	}
570 
571 	/* Ensure a positive value is written to the decrementer, or else
572 	 * some CPUs will continue to take decrementer exceptions. When the
573 	 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
574 	 * 31 bits, which is about 4 seconds on most systems, which gives
575 	 * the watchdog a chance of catching timer interrupt hard lockups.
576 	 */
577 	if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
578 		set_dec(0x7fffffff);
579 	else
580 		set_dec(decrementer_max);
581 
582 	/* Conditionally hard-enable interrupts now that the DEC has been
583 	 * bumped to its maximum value
584 	 */
585 	may_hard_irq_enable();
586 
587 
588 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
589 	if (atomic_read(&ppc_n_lost_interrupts) != 0)
590 		__do_IRQ(regs);
591 #endif
592 
593 	old_regs = set_irq_regs(regs);
594 
595 	trace_timer_interrupt_entry(regs);
596 
597 	if (test_irq_work_pending()) {
598 		clear_irq_work_pending();
599 		irq_work_run();
600 	}
601 
602 	now = get_tb();
603 	if (now >= *next_tb) {
604 		*next_tb = ~(u64)0;
605 		if (evt->event_handler)
606 			evt->event_handler(evt);
607 		__this_cpu_inc(irq_stat.timer_irqs_event);
608 	} else {
609 		now = *next_tb - now;
610 		if (now <= decrementer_max)
611 			set_dec(now);
612 		/* We may have raced with new irq work */
613 		if (test_irq_work_pending())
614 			set_dec(1);
615 		__this_cpu_inc(irq_stat.timer_irqs_others);
616 	}
617 
618 	trace_timer_interrupt_exit(regs);
619 
620 	set_irq_regs(old_regs);
621 }
622 EXPORT_SYMBOL(timer_interrupt);
623 
624 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
625 void timer_broadcast_interrupt(void)
626 {
627 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
628 
629 	*next_tb = ~(u64)0;
630 	tick_receive_broadcast();
631 	__this_cpu_inc(irq_stat.broadcast_irqs_event);
632 }
633 #endif
634 
635 #ifdef CONFIG_SUSPEND
636 /* Overrides the weak version in kernel/power/main.c */
637 void arch_suspend_disable_irqs(void)
638 {
639 	if (ppc_md.suspend_disable_irqs)
640 		ppc_md.suspend_disable_irqs();
641 
642 	/* Disable the decrementer, so that it doesn't interfere
643 	 * with suspending.
644 	 */
645 
646 	set_dec(decrementer_max);
647 	local_irq_disable();
648 	set_dec(decrementer_max);
649 }
650 
651 /* Overrides the weak version in kernel/power/main.c */
652 void arch_suspend_enable_irqs(void)
653 {
654 	local_irq_enable();
655 
656 	if (ppc_md.suspend_enable_irqs)
657 		ppc_md.suspend_enable_irqs();
658 }
659 #endif
660 
661 unsigned long long tb_to_ns(unsigned long long ticks)
662 {
663 	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
664 }
665 EXPORT_SYMBOL_GPL(tb_to_ns);
666 
667 /*
668  * Scheduler clock - returns current time in nanosec units.
669  *
670  * Note: mulhdu(a, b) (multiply high double unsigned) returns
671  * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
672  * are 64-bit unsigned numbers.
673  */
674 notrace unsigned long long sched_clock(void)
675 {
676 	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
677 }
678 
679 
680 #ifdef CONFIG_PPC_PSERIES
681 
682 /*
683  * Running clock - attempts to give a view of time passing for a virtualised
684  * kernels.
685  * Uses the VTB register if available otherwise a next best guess.
686  */
687 unsigned long long running_clock(void)
688 {
689 	/*
690 	 * Don't read the VTB as a host since KVM does not switch in host
691 	 * timebase into the VTB when it takes a guest off the CPU, reading the
692 	 * VTB would result in reading 'last switched out' guest VTB.
693 	 *
694 	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
695 	 * would be unsafe to rely only on the #ifdef above.
696 	 */
697 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
698 	    cpu_has_feature(CPU_FTR_ARCH_207S))
699 		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
700 
701 	/*
702 	 * This is a next best approximation without a VTB.
703 	 * On a host which is running bare metal there should never be any stolen
704 	 * time and on a host which doesn't do any virtualisation TB *should* equal
705 	 * VTB so it makes no difference anyway.
706 	 */
707 	return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
708 }
709 #endif
710 
711 static int __init get_freq(char *name, int cells, unsigned long *val)
712 {
713 	struct device_node *cpu;
714 	const __be32 *fp;
715 	int found = 0;
716 
717 	/* The cpu node should have timebase and clock frequency properties */
718 	cpu = of_find_node_by_type(NULL, "cpu");
719 
720 	if (cpu) {
721 		fp = of_get_property(cpu, name, NULL);
722 		if (fp) {
723 			found = 1;
724 			*val = of_read_ulong(fp, cells);
725 		}
726 
727 		of_node_put(cpu);
728 	}
729 
730 	return found;
731 }
732 
733 static void start_cpu_decrementer(void)
734 {
735 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
736 	unsigned int tcr;
737 
738 	/* Clear any pending timer interrupts */
739 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
740 
741 	tcr = mfspr(SPRN_TCR);
742 	/*
743 	 * The watchdog may have already been enabled by u-boot. So leave
744 	 * TRC[WP] (Watchdog Period) alone.
745 	 */
746 	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
747 	tcr |= TCR_DIE;		/* Enable decrementer */
748 	mtspr(SPRN_TCR, tcr);
749 #endif
750 }
751 
752 void __init generic_calibrate_decr(void)
753 {
754 	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
755 
756 	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
757 	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
758 
759 		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
760 				"(not found)\n");
761 	}
762 
763 	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
764 
765 	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
766 	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
767 
768 		printk(KERN_ERR "WARNING: Estimating processor frequency "
769 				"(not found)\n");
770 	}
771 }
772 
773 int update_persistent_clock64(struct timespec64 now)
774 {
775 	struct rtc_time tm;
776 
777 	if (!ppc_md.set_rtc_time)
778 		return -ENODEV;
779 
780 	rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
781 
782 	return ppc_md.set_rtc_time(&tm);
783 }
784 
785 static void __read_persistent_clock(struct timespec64 *ts)
786 {
787 	struct rtc_time tm;
788 	static int first = 1;
789 
790 	ts->tv_nsec = 0;
791 	/* XXX this is a litle fragile but will work okay in the short term */
792 	if (first) {
793 		first = 0;
794 		if (ppc_md.time_init)
795 			timezone_offset = ppc_md.time_init();
796 
797 		/* get_boot_time() isn't guaranteed to be safe to call late */
798 		if (ppc_md.get_boot_time) {
799 			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
800 			return;
801 		}
802 	}
803 	if (!ppc_md.get_rtc_time) {
804 		ts->tv_sec = 0;
805 		return;
806 	}
807 	ppc_md.get_rtc_time(&tm);
808 
809 	ts->tv_sec = rtc_tm_to_time64(&tm);
810 }
811 
812 void read_persistent_clock64(struct timespec64 *ts)
813 {
814 	__read_persistent_clock(ts);
815 
816 	/* Sanitize it in case real time clock is set below EPOCH */
817 	if (ts->tv_sec < 0) {
818 		ts->tv_sec = 0;
819 		ts->tv_nsec = 0;
820 	}
821 
822 }
823 
824 /* clocksource code */
825 static notrace u64 timebase_read(struct clocksource *cs)
826 {
827 	return (u64)get_tb();
828 }
829 
830 static void __init clocksource_init(void)
831 {
832 	struct clocksource *clock = &clocksource_timebase;
833 
834 	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
835 		printk(KERN_ERR "clocksource: %s is already registered\n",
836 		       clock->name);
837 		return;
838 	}
839 
840 	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
841 	       clock->name, clock->mult, clock->shift);
842 }
843 
844 static int decrementer_set_next_event(unsigned long evt,
845 				      struct clock_event_device *dev)
846 {
847 	__this_cpu_write(decrementers_next_tb, get_tb() + evt);
848 	set_dec(evt);
849 
850 	/* We may have raced with new irq work */
851 	if (test_irq_work_pending())
852 		set_dec(1);
853 
854 	return 0;
855 }
856 
857 static int decrementer_shutdown(struct clock_event_device *dev)
858 {
859 	decrementer_set_next_event(decrementer_max, dev);
860 	return 0;
861 }
862 
863 static void register_decrementer_clockevent(int cpu)
864 {
865 	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
866 
867 	*dec = decrementer_clockevent;
868 	dec->cpumask = cpumask_of(cpu);
869 
870 	clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
871 
872 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
873 		    dec->name, dec->mult, dec->shift, cpu);
874 
875 	/* Set values for KVM, see kvm_emulate_dec() */
876 	decrementer_clockevent.mult = dec->mult;
877 	decrementer_clockevent.shift = dec->shift;
878 }
879 
880 static void enable_large_decrementer(void)
881 {
882 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
883 		return;
884 
885 	if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
886 		return;
887 
888 	/*
889 	 * If we're running as the hypervisor we need to enable the LD manually
890 	 * otherwise firmware should have done it for us.
891 	 */
892 	if (cpu_has_feature(CPU_FTR_HVMODE))
893 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
894 }
895 
896 static void __init set_decrementer_max(void)
897 {
898 	struct device_node *cpu;
899 	u32 bits = 32;
900 
901 	/* Prior to ISAv3 the decrementer is always 32 bit */
902 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
903 		return;
904 
905 	cpu = of_find_node_by_type(NULL, "cpu");
906 
907 	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
908 		if (bits > 64 || bits < 32) {
909 			pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
910 			bits = 32;
911 		}
912 
913 		/* calculate the signed maximum given this many bits */
914 		decrementer_max = (1ul << (bits - 1)) - 1;
915 	}
916 
917 	of_node_put(cpu);
918 
919 	pr_info("time_init: %u bit decrementer (max: %llx)\n",
920 		bits, decrementer_max);
921 }
922 
923 static void __init init_decrementer_clockevent(void)
924 {
925 	register_decrementer_clockevent(smp_processor_id());
926 }
927 
928 void secondary_cpu_time_init(void)
929 {
930 	/* Enable and test the large decrementer for this cpu */
931 	enable_large_decrementer();
932 
933 	/* Start the decrementer on CPUs that have manual control
934 	 * such as BookE
935 	 */
936 	start_cpu_decrementer();
937 
938 	/* FIME: Should make unrelatred change to move snapshot_timebase
939 	 * call here ! */
940 	register_decrementer_clockevent(smp_processor_id());
941 }
942 
943 /* This function is only called on the boot processor */
944 void __init time_init(void)
945 {
946 	struct div_result res;
947 	u64 scale;
948 	unsigned shift;
949 
950 	/* Normal PowerPC with timebase register */
951 	ppc_md.calibrate_decr();
952 	printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
953 	       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
954 	printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
955 	       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
956 
957 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
958 	tb_ticks_per_sec = ppc_tb_freq;
959 	tb_ticks_per_usec = ppc_tb_freq / 1000000;
960 	calc_cputime_factors();
961 
962 	/*
963 	 * Compute scale factor for sched_clock.
964 	 * The calibrate_decr() function has set tb_ticks_per_sec,
965 	 * which is the timebase frequency.
966 	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
967 	 * the 128-bit result as a 64.64 fixed-point number.
968 	 * We then shift that number right until it is less than 1.0,
969 	 * giving us the scale factor and shift count to use in
970 	 * sched_clock().
971 	 */
972 	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
973 	scale = res.result_low;
974 	for (shift = 0; res.result_high != 0; ++shift) {
975 		scale = (scale >> 1) | (res.result_high << 63);
976 		res.result_high >>= 1;
977 	}
978 	tb_to_ns_scale = scale;
979 	tb_to_ns_shift = shift;
980 	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
981 	boot_tb = get_tb();
982 
983 	/* If platform provided a timezone (pmac), we correct the time */
984 	if (timezone_offset) {
985 		sys_tz.tz_minuteswest = -timezone_offset / 60;
986 		sys_tz.tz_dsttime = 0;
987 	}
988 
989 	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
990 
991 	/* initialise and enable the large decrementer (if we have one) */
992 	set_decrementer_max();
993 	enable_large_decrementer();
994 
995 	/* Start the decrementer on CPUs that have manual control
996 	 * such as BookE
997 	 */
998 	start_cpu_decrementer();
999 
1000 	/* Register the clocksource */
1001 	clocksource_init();
1002 
1003 	init_decrementer_clockevent();
1004 	tick_setup_hrtimer_broadcast();
1005 
1006 	of_clk_init(NULL);
1007 	enable_sched_clock_irqtime();
1008 }
1009 
1010 /*
1011  * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1012  * result.
1013  */
1014 void div128_by_32(u64 dividend_high, u64 dividend_low,
1015 		  unsigned divisor, struct div_result *dr)
1016 {
1017 	unsigned long a, b, c, d;
1018 	unsigned long w, x, y, z;
1019 	u64 ra, rb, rc;
1020 
1021 	a = dividend_high >> 32;
1022 	b = dividend_high & 0xffffffff;
1023 	c = dividend_low >> 32;
1024 	d = dividend_low & 0xffffffff;
1025 
1026 	w = a / divisor;
1027 	ra = ((u64)(a - (w * divisor)) << 32) + b;
1028 
1029 	rb = ((u64) do_div(ra, divisor) << 32) + c;
1030 	x = ra;
1031 
1032 	rc = ((u64) do_div(rb, divisor) << 32) + d;
1033 	y = rb;
1034 
1035 	do_div(rc, divisor);
1036 	z = rc;
1037 
1038 	dr->result_high = ((u64)w << 32) + x;
1039 	dr->result_low  = ((u64)y << 32) + z;
1040 
1041 }
1042 
1043 /* We don't need to calibrate delay, we use the CPU timebase for that */
1044 void calibrate_delay(void)
1045 {
1046 	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1047 	 * as the number of __delay(1) in a jiffy, so make it so
1048 	 */
1049 	loops_per_jiffy = tb_ticks_per_jiffy;
1050 }
1051 
1052 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1053 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1054 {
1055 	ppc_md.get_rtc_time(tm);
1056 	return 0;
1057 }
1058 
1059 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1060 {
1061 	if (!ppc_md.set_rtc_time)
1062 		return -EOPNOTSUPP;
1063 
1064 	if (ppc_md.set_rtc_time(tm) < 0)
1065 		return -EOPNOTSUPP;
1066 
1067 	return 0;
1068 }
1069 
1070 static const struct rtc_class_ops rtc_generic_ops = {
1071 	.read_time = rtc_generic_get_time,
1072 	.set_time = rtc_generic_set_time,
1073 };
1074 
1075 static int __init rtc_init(void)
1076 {
1077 	struct platform_device *pdev;
1078 
1079 	if (!ppc_md.get_rtc_time)
1080 		return -ENODEV;
1081 
1082 	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1083 					     &rtc_generic_ops,
1084 					     sizeof(rtc_generic_ops));
1085 
1086 	return PTR_ERR_OR_ZERO(pdev);
1087 }
1088 
1089 device_initcall(rtc_init);
1090 #endif
1091