xref: /linux/drivers/clocksource/timer-nxp-stm.c (revision 6376c0770656f3bdf7f411faf068371b6932aeca)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016 Freescale Semiconductor, Inc.
4  * Copyright 2018,2021-2025 NXP
5  *
6  * NXP System Timer Module:
7  *
8  *  STM supports commonly required system and application software
9  *  timing functions. STM includes a 32-bit count-up timer and four
10  *  32-bit compare channels with a separate interrupt source for each
11  *  channel. The timer is driven by the STM module clock divided by an
12  *  8-bit prescale value (1 to 256). It has ability to stop the timer
13  *  in Debug mode
14  */
15 #include <linux/clk.h>
16 #include <linux/clockchips.h>
17 #include <linux/cpuhotplug.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of_irq.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched_clock.h>
23 #include <linux/units.h>
24 
25 #define STM_CR(__base)		(__base)
26 
27 #define STM_CR_TEN		BIT(0)
28 #define STM_CR_FRZ		BIT(1)
29 #define STM_CR_CPS_OFFSET	8u
30 #define STM_CR_CPS_MASK		GENMASK(15, STM_CR_CPS_OFFSET)
31 
32 #define STM_CNT(__base)		((__base) + 0x04)
33 
34 #define STM_CCR0(__base)	((__base) + 0x10)
35 #define STM_CCR1(__base)	((__base) + 0x20)
36 #define STM_CCR2(__base)	((__base) + 0x30)
37 #define STM_CCR3(__base)	((__base) + 0x40)
38 
39 #define STM_CCR_CEN		BIT(0)
40 
41 #define STM_CIR0(__base)	((__base) + 0x14)
42 #define STM_CIR1(__base)	((__base) + 0x24)
43 #define STM_CIR2(__base)	((__base) + 0x34)
44 #define STM_CIR3(__base)	((__base) + 0x44)
45 
46 #define STM_CIR_CIF		BIT(0)
47 
48 #define STM_CMP0(__base)	((__base) + 0x18)
49 #define STM_CMP1(__base)	((__base) + 0x28)
50 #define STM_CMP2(__base)	((__base) + 0x38)
51 #define STM_CMP3(__base)	((__base) + 0x48)
52 
53 #define STM_ENABLE_MASK	(STM_CR_FRZ | STM_CR_TEN)
54 
55 struct stm_timer {
56 	void __iomem *base;
57 	unsigned long rate;
58 	unsigned long delta;
59 	unsigned long counter;
60 	struct clock_event_device ced;
61 	struct clocksource cs;
62 	atomic_t refcnt;
63 };
64 
65 static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
66 
67 static struct stm_timer *stm_sched_clock;
68 
69 /*
70  * Global structure for multiple STMs initialization
71  */
72 static int stm_instances;
73 
74 /*
75  * This global lock is used to prevent race conditions with the
76  * stm_instances in case the driver is using the ASYNC option
77  */
78 static DEFINE_MUTEX(stm_instances_lock);
79 
DEFINE_GUARD(stm_instances,struct mutex *,mutex_lock (_T),mutex_unlock (_T))80 DEFINE_GUARD(stm_instances, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
81 
82 static struct stm_timer *cs_to_stm(struct clocksource *cs)
83 {
84 	return container_of(cs, struct stm_timer, cs);
85 }
86 
ced_to_stm(struct clock_event_device * ced)87 static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
88 {
89 	return container_of(ced, struct stm_timer, ced);
90 }
91 
nxp_stm_read_sched_clock(void)92 static u64 notrace nxp_stm_read_sched_clock(void)
93 {
94 	return readl(STM_CNT(stm_sched_clock->base));
95 }
96 
nxp_stm_clocksource_getcnt(struct stm_timer * stm_timer)97 static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
98 {
99 	return readl(STM_CNT(stm_timer->base));
100 }
101 
nxp_stm_clocksource_setcnt(struct stm_timer * stm_timer,u32 cnt)102 static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
103 {
104 	writel(cnt, STM_CNT(stm_timer->base));
105 }
106 
nxp_stm_clocksource_read(struct clocksource * cs)107 static u64 nxp_stm_clocksource_read(struct clocksource *cs)
108 {
109 	struct stm_timer *stm_timer = cs_to_stm(cs);
110 
111 	return (u64)nxp_stm_clocksource_getcnt(stm_timer);
112 }
113 
nxp_stm_module_enable(struct stm_timer * stm_timer)114 static void nxp_stm_module_enable(struct stm_timer *stm_timer)
115 {
116 	u32 reg;
117 
118 	reg = readl(STM_CR(stm_timer->base));
119 
120 	reg |= STM_ENABLE_MASK;
121 
122 	writel(reg, STM_CR(stm_timer->base));
123 }
124 
nxp_stm_module_disable(struct stm_timer * stm_timer)125 static void nxp_stm_module_disable(struct stm_timer *stm_timer)
126 {
127 	u32 reg;
128 
129 	reg = readl(STM_CR(stm_timer->base));
130 
131 	reg &= ~STM_ENABLE_MASK;
132 
133 	writel(reg, STM_CR(stm_timer->base));
134 }
135 
nxp_stm_module_put(struct stm_timer * stm_timer)136 static void nxp_stm_module_put(struct stm_timer *stm_timer)
137 {
138 	if (atomic_dec_and_test(&stm_timer->refcnt))
139 		nxp_stm_module_disable(stm_timer);
140 }
141 
nxp_stm_module_get(struct stm_timer * stm_timer)142 static void nxp_stm_module_get(struct stm_timer *stm_timer)
143 {
144 	if (atomic_inc_return(&stm_timer->refcnt) == 1)
145 		nxp_stm_module_enable(stm_timer);
146 }
147 
nxp_stm_clocksource_enable(struct clocksource * cs)148 static int nxp_stm_clocksource_enable(struct clocksource *cs)
149 {
150 	struct stm_timer *stm_timer = cs_to_stm(cs);
151 
152 	nxp_stm_module_get(stm_timer);
153 
154 	return 0;
155 }
156 
nxp_stm_clocksource_disable(struct clocksource * cs)157 static void nxp_stm_clocksource_disable(struct clocksource *cs)
158 {
159 	struct stm_timer *stm_timer = cs_to_stm(cs);
160 
161 	nxp_stm_module_put(stm_timer);
162 }
163 
nxp_stm_clocksource_suspend(struct clocksource * cs)164 static void nxp_stm_clocksource_suspend(struct clocksource *cs)
165 {
166 	struct stm_timer *stm_timer = cs_to_stm(cs);
167 
168 	nxp_stm_clocksource_disable(cs);
169 	stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
170 }
171 
nxp_stm_clocksource_resume(struct clocksource * cs)172 static void nxp_stm_clocksource_resume(struct clocksource *cs)
173 {
174 	struct stm_timer *stm_timer = cs_to_stm(cs);
175 
176 	nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
177 	nxp_stm_clocksource_enable(cs);
178 }
179 
devm_clocksource_unregister(void * data)180 static void __init devm_clocksource_unregister(void *data)
181 {
182 	struct stm_timer *stm_timer = data;
183 
184 	clocksource_unregister(&stm_timer->cs);
185 }
186 
nxp_stm_clocksource_init(struct device * dev,struct stm_timer * stm_timer,const char * name,void __iomem * base,struct clk * clk)187 static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
188 					   const char *name, void __iomem *base, struct clk *clk)
189 {
190 	int ret;
191 
192 	stm_timer->base = base;
193 	stm_timer->rate = clk_get_rate(clk);
194 
195 	stm_timer->cs.name = name;
196 	stm_timer->cs.rating = 460;
197 	stm_timer->cs.read = nxp_stm_clocksource_read;
198 	stm_timer->cs.enable = nxp_stm_clocksource_enable;
199 	stm_timer->cs.disable = nxp_stm_clocksource_disable;
200 	stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
201 	stm_timer->cs.resume = nxp_stm_clocksource_resume;
202 	stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
203 	stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
204 
205 	ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
206 	if (ret)
207 		return ret;
208 
209 	ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
210 	if (ret) {
211 		clocksource_unregister(&stm_timer->cs);
212 		return ret;
213 	}
214 
215 	stm_sched_clock = stm_timer;
216 
217 	sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
218 
219 	dev_dbg(dev, "Registered clocksource %s\n", name);
220 
221 	return 0;
222 }
223 
nxp_stm_clockevent_read_counter(struct stm_timer * stm_timer)224 static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
225 {
226 	return readl(STM_CNT(stm_timer->base));
227 }
228 
nxp_stm_clockevent_disable(struct stm_timer * stm_timer)229 static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
230 {
231 	writel(0, STM_CCR0(stm_timer->base));
232 }
233 
nxp_stm_clockevent_enable(struct stm_timer * stm_timer)234 static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
235 {
236 	writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
237 }
238 
nxp_stm_clockevent_shutdown(struct clock_event_device * ced)239 static int nxp_stm_clockevent_shutdown(struct clock_event_device *ced)
240 {
241 	struct stm_timer *stm_timer = ced_to_stm(ced);
242 
243 	nxp_stm_clockevent_disable(stm_timer);
244 
245 	return 0;
246 }
247 
nxp_stm_clockevent_set_next_event(unsigned long delta,struct clock_event_device * ced)248 static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
249 {
250 	struct stm_timer *stm_timer = ced_to_stm(ced);
251 	u32 val;
252 
253 	nxp_stm_clockevent_disable(stm_timer);
254 
255 	stm_timer->delta = delta;
256 
257 	val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
258 
259 	writel(val, STM_CMP0(stm_timer->base));
260 
261 	/*
262 	 * The counter is shared across the channels and can not be
263 	 * stopped while we are setting the next event. If the delta
264 	 * is very small it is possible the counter increases above
265 	 * the computed 'val'. The min_delta value specified when
266 	 * registering the clockevent will prevent that. The second
267 	 * case is if the counter wraps while we compute the 'val' and
268 	 * before writing the comparator register. We read the counter,
269 	 * check if we are back in time and abort the timer with -ETIME.
270 	 */
271 	if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
272 		return -ETIME;
273 
274 	nxp_stm_clockevent_enable(stm_timer);
275 
276 	return 0;
277 }
278 
nxp_stm_clockevent_set_periodic(struct clock_event_device * ced)279 static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
280 {
281 	struct stm_timer *stm_timer = ced_to_stm(ced);
282 
283 	return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
284 }
285 
nxp_stm_clockevent_suspend(struct clock_event_device * ced)286 static void nxp_stm_clockevent_suspend(struct clock_event_device *ced)
287 {
288 	struct stm_timer *stm_timer = ced_to_stm(ced);
289 
290 	nxp_stm_module_put(stm_timer);
291 }
292 
nxp_stm_clockevent_resume(struct clock_event_device * ced)293 static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
294 {
295 	struct stm_timer *stm_timer = ced_to_stm(ced);
296 
297 	nxp_stm_module_get(stm_timer);
298 }
299 
nxp_stm_clockevent_per_cpu_init(struct device * dev,struct stm_timer * stm_timer,const char * name,void __iomem * base,int irq,struct clk * clk,int cpu)300 static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
301 						  const char *name, void __iomem *base, int irq,
302 						  struct clk *clk, int cpu)
303 {
304 	stm_timer->base = base;
305 	stm_timer->rate = clk_get_rate(clk);
306 
307 	stm_timer->ced.name = name;
308 	stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
309 	stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
310 	stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
311 	stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
312 	stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
313 	stm_timer->ced.resume = nxp_stm_clockevent_resume;
314 	stm_timer->ced.cpumask = cpumask_of(cpu);
315 	stm_timer->ced.rating = 460;
316 	stm_timer->ced.irq = irq;
317 
318 	per_cpu(stm_timers, cpu) = stm_timer;
319 
320 	nxp_stm_module_get(stm_timer);
321 
322 	dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
323 
324 	return 0;
325 }
326 
nxp_stm_clockevent_starting_cpu(unsigned int cpu)327 static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
328 {
329 	struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
330 	int ret;
331 
332 	if (WARN_ON(!stm_timer))
333 		return -EFAULT;
334 
335 	ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
336 	if (ret)
337 		return ret;
338 
339 	/*
340 	 * The timings measurement show reading the counter register
341 	 * and writing to the comparator register takes as a maximum
342 	 * value 1100 ns at 133MHz rate frequency. The timer must be
343 	 * set above this value and to be secure we set the minimum
344 	 * value equal to 2000ns, so 2us.
345 	 *
346 	 * minimum ticks = (rate / MICRO) * 2
347 	 */
348 	clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
349 					(stm_timer->rate / MICRO) * 2, ULONG_MAX);
350 
351 	return 0;
352 }
353 
nxp_stm_module_interrupt(int irq,void * dev_id)354 static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
355 {
356 	struct stm_timer *stm_timer = dev_id;
357 	struct clock_event_device *ced = &stm_timer->ced;
358 	u32 val;
359 
360 	/*
361 	 * The interrupt is shared across the channels in the
362 	 * module. But this one is configured to run only one channel,
363 	 * consequently it is pointless to test the interrupt flags
364 	 * before and we can directly reset the channel 0 irq flag
365 	 * register.
366 	 */
367 	writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
368 
369 	/*
370 	 * Update STM_CMP value using the counter value
371 	 */
372 	val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
373 
374 	writel(val, STM_CMP0(stm_timer->base));
375 
376 	/*
377 	 * stm hardware doesn't support oneshot, it will generate an
378 	 * interrupt and start the counter again so software needs to
379 	 * disable the timer to stop the counter loop in ONESHOT mode.
380 	 */
381 	if (likely(clockevent_state_oneshot(ced)))
382 		nxp_stm_clockevent_disable(stm_timer);
383 
384 	ced->event_handler(ced);
385 
386 	return IRQ_HANDLED;
387 }
388 
nxp_stm_timer_probe(struct platform_device * pdev)389 static int __init nxp_stm_timer_probe(struct platform_device *pdev)
390 {
391 	struct stm_timer *stm_timer;
392 	struct device *dev = &pdev->dev;
393 	struct device_node *np = dev->of_node;
394 	const char *name = of_node_full_name(np);
395 	struct clk *clk;
396 	void __iomem *base;
397 	int irq, ret;
398 
399 	/*
400 	 * The device tree can have multiple STM nodes described, so
401 	 * it makes this driver a good candidate for the async probe.
402 	 * It is still unclear if the time framework correctly handles
403 	 * parallel loading of the timers but at least this driver is
404 	 * ready to support the option.
405 	 */
406 	guard(stm_instances)(&stm_instances_lock);
407 
408 	/*
409 	 * The S32Gx are SoCs featuring a diverse set of cores. Linux
410 	 * is expected to run on Cortex-A53 cores, while other
411 	 * software stacks will operate on Cortex-M cores. The number
412 	 * of STM instances has been sized to include at most one
413 	 * instance per core.
414 	 *
415 	 * As we need a clocksource and a clockevent per cpu, we
416 	 * simply initialize a clocksource per cpu along with the
417 	 * clockevent which makes the resulting code simpler.
418 	 *
419 	 * However if the device tree is describing more STM instances
420 	 * than the number of cores, then we ignore them.
421 	 */
422 	if (stm_instances >= num_possible_cpus())
423 		return 0;
424 
425 	base = devm_of_iomap(dev, np, 0, NULL);
426 	if (IS_ERR(base))
427 		return dev_err_probe(dev, PTR_ERR(base), "Failed to iomap %pOFn\n", np);
428 
429 	irq = platform_get_irq(pdev, 0);
430 	if (irq < 0)
431 		return dev_err_probe(dev, irq, "Failed to get IRQ\n");
432 
433 	clk = devm_clk_get_enabled(dev, NULL);
434 	if (IS_ERR(clk))
435 		return dev_err_probe(dev, PTR_ERR(clk), "Clock not found\n");
436 
437 	stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
438 	if (!stm_timer)
439 		return -ENOMEM;
440 
441 	ret = devm_request_irq(dev, irq, nxp_stm_module_interrupt,
442 			       IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
443 	if (ret)
444 		return dev_err_probe(dev, ret, "Unable to allocate interrupt line\n");
445 
446 	ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
447 	if (ret)
448 		return ret;
449 
450 	/*
451 	 * Next probed STM will be a per CPU clockevent, until we
452 	 * probe as many as we have CPUs available on the system, we
453 	 * do a partial initialization
454 	 */
455 	ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
456 					      base, irq, clk,
457 					      stm_instances);
458 	if (ret)
459 		return ret;
460 
461 	stm_instances++;
462 
463 	/*
464 	 * The number of probed STMs for per CPU clockevent is
465 	 * equal to the number of available CPUs on the
466 	 * system. We install the cpu hotplug to finish the
467 	 * initialization by registering the clockevents
468 	 */
469 	if (stm_instances == num_possible_cpus()) {
470 		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
471 					nxp_stm_clockevent_starting_cpu, NULL);
472 		if (ret < 0)
473 			return ret;
474 	}
475 
476 	return 0;
477 }
478 
479 static const struct of_device_id nxp_stm_of_match[] = {
480 	{ .compatible = "nxp,s32g2-stm" },
481 	{ }
482 };
483 MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
484 
485 static struct platform_driver nxp_stm_probe = {
486 	.probe	= nxp_stm_timer_probe,
487 	.driver	= {
488 		.name		= "nxp-stm",
489 		.of_match_table	= nxp_stm_of_match,
490 	},
491 };
492 module_platform_driver(nxp_stm_probe);
493 
494 MODULE_DESCRIPTION("NXP System Timer Module driver");
495 MODULE_LICENSE("GPL");
496