xref: /linux/arch/parisc/kernel/time.c (revision 3ea5eb68b9d624935108b5e696859304edfac202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common time service routines for parisc machines.
4  * based on arch/loongarch/kernel/time.c
5  *
6  * Copyright (C) 2024 Helge Deller <deller@gmx.de>
7  */
8 #include <linux/clockchips.h>
9 #include <linux/delay.h>
10 #include <linux/export.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/sched_clock.h>
15 #include <linux/spinlock.h>
16 #include <linux/rtc.h>
17 #include <linux/platform_device.h>
18 #include <asm/processor.h>
19 
20 static u64 cr16_clock_freq;
21 static unsigned long clocktick;
22 
23 int time_keeper_id;	/* CPU used for timekeeping */
24 
25 static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device);
26 
27 static void parisc_event_handler(struct clock_event_device *dev)
28 {
29 }
30 
31 static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt)
32 {
33 	unsigned long new_cr16;
34 
35 	new_cr16 = mfctl(16) + delta;
36 	mtctl(new_cr16, 16);
37 
38 	return 0;
39 }
40 
41 irqreturn_t timer_interrupt(int irq, void *data)
42 {
43 	struct clock_event_device *cd;
44 	int cpu = smp_processor_id();
45 
46 	cd = &per_cpu(parisc_clockevent_device, cpu);
47 
48 	if (clockevent_state_periodic(cd))
49 		parisc_timer_next_event(clocktick, cd);
50 
51 	if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd))
52 		cd->event_handler(cd);
53 
54 	return IRQ_HANDLED;
55 }
56 
57 static int parisc_set_state_oneshot(struct clock_event_device *evt)
58 {
59 	parisc_timer_next_event(clocktick, evt);
60 
61 	return 0;
62 }
63 
64 static int parisc_set_state_periodic(struct clock_event_device *evt)
65 {
66 	parisc_timer_next_event(clocktick, evt);
67 
68 	return 0;
69 }
70 
71 static int parisc_set_state_shutdown(struct clock_event_device *evt)
72 {
73 	return 0;
74 }
75 
76 void parisc_clockevent_init(void)
77 {
78 	unsigned int cpu = smp_processor_id();
79 	unsigned long min_delta = 0x600;	/* XXX */
80 	unsigned long max_delta = (1UL << (BITS_PER_LONG - 1));
81 	struct clock_event_device *cd;
82 
83 	cd = &per_cpu(parisc_clockevent_device, cpu);
84 
85 	cd->name = "cr16_clockevent";
86 	cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
87 			CLOCK_EVT_FEAT_PERCPU;
88 
89 	cd->irq = TIMER_IRQ;
90 	cd->rating = 320;
91 	cd->cpumask = cpumask_of(cpu);
92 	cd->set_state_oneshot = parisc_set_state_oneshot;
93 	cd->set_state_oneshot_stopped = parisc_set_state_shutdown;
94 	cd->set_state_periodic = parisc_set_state_periodic;
95 	cd->set_state_shutdown = parisc_set_state_shutdown;
96 	cd->set_next_event = parisc_timer_next_event;
97 	cd->event_handler = parisc_event_handler;
98 
99 	clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta);
100 }
101 
102 unsigned long notrace profile_pc(struct pt_regs *regs)
103 {
104 	unsigned long pc = instruction_pointer(regs);
105 
106 	if (regs->gr[0] & PSW_N)
107 		pc -= 4;
108 
109 #ifdef CONFIG_SMP
110 	if (in_lock_functions(pc))
111 		pc = regs->gr[2];
112 #endif
113 
114 	return pc;
115 }
116 EXPORT_SYMBOL(profile_pc);
117 
118 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
119 static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
120 {
121 	struct pdc_tod tod_data;
122 
123 	memset(tm, 0, sizeof(*tm));
124 	if (pdc_tod_read(&tod_data) < 0)
125 		return -EOPNOTSUPP;
126 
127 	/* we treat tod_sec as unsigned, so this can work until year 2106 */
128 	rtc_time64_to_tm(tod_data.tod_sec, tm);
129 	return 0;
130 }
131 
132 static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
133 {
134 	time64_t secs = rtc_tm_to_time64(tm);
135 	int ret;
136 
137 	/* hppa has Y2K38 problem: pdc_tod_set() takes an u32 value! */
138 	ret = pdc_tod_set(secs, 0);
139 	if (ret != 0) {
140 		pr_warn("pdc_tod_set(%lld) returned error %d\n", secs, ret);
141 		if (ret == PDC_INVALID_ARG)
142 			return -EINVAL;
143 		return -EOPNOTSUPP;
144 	}
145 
146 	return 0;
147 }
148 
149 static const struct rtc_class_ops rtc_generic_ops = {
150 	.read_time = rtc_generic_get_time,
151 	.set_time = rtc_generic_set_time,
152 };
153 
154 static int __init rtc_init(void)
155 {
156 	struct platform_device *pdev;
157 
158 	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
159 					     &rtc_generic_ops,
160 					     sizeof(rtc_generic_ops));
161 
162 	return PTR_ERR_OR_ZERO(pdev);
163 }
164 device_initcall(rtc_init);
165 #endif
166 
167 void read_persistent_clock64(struct timespec64 *ts)
168 {
169 	static struct pdc_tod tod_data;
170 	if (pdc_tod_read(&tod_data) == 0) {
171 		ts->tv_sec = tod_data.tod_sec;
172 		ts->tv_nsec = tod_data.tod_usec * 1000;
173 	} else {
174 		printk(KERN_ERR "Error reading tod clock\n");
175 	        ts->tv_sec = 0;
176 		ts->tv_nsec = 0;
177 	}
178 }
179 
180 static u64 notrace read_cr16_sched_clock(void)
181 {
182 	return get_cycles();
183 }
184 
185 static u64 notrace read_cr16(struct clocksource *cs)
186 {
187 	return get_cycles();
188 }
189 
190 static struct clocksource clocksource_cr16 = {
191 	.name			= "cr16",
192 	.rating			= 300,
193 	.read			= read_cr16,
194 	.mask			= CLOCKSOURCE_MASK(BITS_PER_LONG),
195 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
196 					CLOCK_SOURCE_VALID_FOR_HRES |
197 					CLOCK_SOURCE_MUST_VERIFY |
198 					CLOCK_SOURCE_VERIFY_PERCPU,
199 };
200 
201 
202 /*
203  * timer interrupt and sched_clock() initialization
204  */
205 
206 void __init time_init(void)
207 {
208 	cr16_clock_freq = 100 * PAGE0->mem_10msec;  /* Hz */
209 	clocktick = cr16_clock_freq / HZ;
210 
211 	/* register as sched_clock source */
212 	sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq);
213 
214 	parisc_clockevent_init();
215 
216 	/* register at clocksource framework */
217 	clocksource_register_hz(&clocksource_cr16, cr16_clock_freq);
218 }
219