1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016 Freescale Semiconductor, Inc.
4 * Copyright 2018,2021-2025 NXP
5 *
6 * NXP System Timer Module:
7 *
8 * STM supports commonly required system and application software
9 * timing functions. STM includes a 32-bit count-up timer and four
10 * 32-bit compare channels with a separate interrupt source for each
11 * channel. The timer is driven by the STM module clock divided by an
12 * 8-bit prescale value (1 to 256). It has ability to stop the timer
13 * in Debug mode
14 */
15 #include <linux/clk.h>
16 #include <linux/clockchips.h>
17 #include <linux/cpuhotplug.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of_irq.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched_clock.h>
23 #include <linux/units.h>
24
25 #define STM_CR(__base) (__base)
26
27 #define STM_CR_TEN BIT(0)
28 #define STM_CR_FRZ BIT(1)
29 #define STM_CR_CPS_OFFSET 8u
30 #define STM_CR_CPS_MASK GENMASK(15, STM_CR_CPS_OFFSET)
31
32 #define STM_CNT(__base) ((__base) + 0x04)
33
34 #define STM_CCR0(__base) ((__base) + 0x10)
35 #define STM_CCR1(__base) ((__base) + 0x20)
36 #define STM_CCR2(__base) ((__base) + 0x30)
37 #define STM_CCR3(__base) ((__base) + 0x40)
38
39 #define STM_CCR_CEN BIT(0)
40
41 #define STM_CIR0(__base) ((__base) + 0x14)
42 #define STM_CIR1(__base) ((__base) + 0x24)
43 #define STM_CIR2(__base) ((__base) + 0x34)
44 #define STM_CIR3(__base) ((__base) + 0x44)
45
46 #define STM_CIR_CIF BIT(0)
47
48 #define STM_CMP0(__base) ((__base) + 0x18)
49 #define STM_CMP1(__base) ((__base) + 0x28)
50 #define STM_CMP2(__base) ((__base) + 0x38)
51 #define STM_CMP3(__base) ((__base) + 0x48)
52
53 #define STM_ENABLE_MASK (STM_CR_FRZ | STM_CR_TEN)
54
55 struct stm_timer {
56 void __iomem *base;
57 unsigned long rate;
58 unsigned long delta;
59 unsigned long counter;
60 struct clock_event_device ced;
61 struct clocksource cs;
62 atomic_t refcnt;
63 };
64
65 static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
66
67 static struct stm_timer *stm_sched_clock;
68
69 /*
70 * Global structure for multiple STMs initialization
71 */
72 static int stm_instances;
73
74 /*
75 * This global lock is used to prevent race conditions with the
76 * stm_instances in case the driver is using the ASYNC option
77 */
78 static DEFINE_MUTEX(stm_instances_lock);
79
DEFINE_GUARD(stm_instances,struct mutex *,mutex_lock (_T),mutex_unlock (_T))80 DEFINE_GUARD(stm_instances, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
81
82 static struct stm_timer *cs_to_stm(struct clocksource *cs)
83 {
84 return container_of(cs, struct stm_timer, cs);
85 }
86
ced_to_stm(struct clock_event_device * ced)87 static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
88 {
89 return container_of(ced, struct stm_timer, ced);
90 }
91
nxp_stm_read_sched_clock(void)92 static u64 notrace nxp_stm_read_sched_clock(void)
93 {
94 return readl(STM_CNT(stm_sched_clock->base));
95 }
96
nxp_stm_clocksource_getcnt(struct stm_timer * stm_timer)97 static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
98 {
99 return readl(STM_CNT(stm_timer->base));
100 }
101
nxp_stm_clocksource_setcnt(struct stm_timer * stm_timer,u32 cnt)102 static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
103 {
104 writel(cnt, STM_CNT(stm_timer->base));
105 }
106
nxp_stm_clocksource_read(struct clocksource * cs)107 static u64 nxp_stm_clocksource_read(struct clocksource *cs)
108 {
109 struct stm_timer *stm_timer = cs_to_stm(cs);
110
111 return (u64)nxp_stm_clocksource_getcnt(stm_timer);
112 }
113
nxp_stm_module_enable(struct stm_timer * stm_timer)114 static void nxp_stm_module_enable(struct stm_timer *stm_timer)
115 {
116 u32 reg;
117
118 reg = readl(STM_CR(stm_timer->base));
119
120 reg |= STM_ENABLE_MASK;
121
122 writel(reg, STM_CR(stm_timer->base));
123 }
124
nxp_stm_module_disable(struct stm_timer * stm_timer)125 static void nxp_stm_module_disable(struct stm_timer *stm_timer)
126 {
127 u32 reg;
128
129 reg = readl(STM_CR(stm_timer->base));
130
131 reg &= ~STM_ENABLE_MASK;
132
133 writel(reg, STM_CR(stm_timer->base));
134 }
135
nxp_stm_module_put(struct stm_timer * stm_timer)136 static void nxp_stm_module_put(struct stm_timer *stm_timer)
137 {
138 if (atomic_dec_and_test(&stm_timer->refcnt))
139 nxp_stm_module_disable(stm_timer);
140 }
141
nxp_stm_module_get(struct stm_timer * stm_timer)142 static void nxp_stm_module_get(struct stm_timer *stm_timer)
143 {
144 if (atomic_inc_return(&stm_timer->refcnt) == 1)
145 nxp_stm_module_enable(stm_timer);
146 }
147
nxp_stm_clocksource_enable(struct clocksource * cs)148 static int nxp_stm_clocksource_enable(struct clocksource *cs)
149 {
150 struct stm_timer *stm_timer = cs_to_stm(cs);
151
152 nxp_stm_module_get(stm_timer);
153
154 return 0;
155 }
156
nxp_stm_clocksource_disable(struct clocksource * cs)157 static void nxp_stm_clocksource_disable(struct clocksource *cs)
158 {
159 struct stm_timer *stm_timer = cs_to_stm(cs);
160
161 nxp_stm_module_put(stm_timer);
162 }
163
nxp_stm_clocksource_suspend(struct clocksource * cs)164 static void nxp_stm_clocksource_suspend(struct clocksource *cs)
165 {
166 struct stm_timer *stm_timer = cs_to_stm(cs);
167
168 nxp_stm_clocksource_disable(cs);
169 stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
170 }
171
nxp_stm_clocksource_resume(struct clocksource * cs)172 static void nxp_stm_clocksource_resume(struct clocksource *cs)
173 {
174 struct stm_timer *stm_timer = cs_to_stm(cs);
175
176 nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
177 nxp_stm_clocksource_enable(cs);
178 }
179
devm_clocksource_unregister(void * data)180 static void __init devm_clocksource_unregister(void *data)
181 {
182 struct stm_timer *stm_timer = data;
183
184 clocksource_unregister(&stm_timer->cs);
185 }
186
nxp_stm_clocksource_init(struct device * dev,struct stm_timer * stm_timer,const char * name,void __iomem * base,struct clk * clk)187 static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
188 const char *name, void __iomem *base, struct clk *clk)
189 {
190 int ret;
191
192 stm_timer->base = base;
193 stm_timer->rate = clk_get_rate(clk);
194
195 stm_timer->cs.name = name;
196 stm_timer->cs.rating = 460;
197 stm_timer->cs.read = nxp_stm_clocksource_read;
198 stm_timer->cs.enable = nxp_stm_clocksource_enable;
199 stm_timer->cs.disable = nxp_stm_clocksource_disable;
200 stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
201 stm_timer->cs.resume = nxp_stm_clocksource_resume;
202 stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
203 stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
204 stm_timer->cs.owner = THIS_MODULE;
205
206 ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
207 if (ret)
208 return ret;
209
210 ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
211 if (ret) {
212 clocksource_unregister(&stm_timer->cs);
213 return ret;
214 }
215
216 stm_sched_clock = stm_timer;
217
218 sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
219
220 dev_dbg(dev, "Registered clocksource %s\n", name);
221
222 return 0;
223 }
224
nxp_stm_clockevent_read_counter(struct stm_timer * stm_timer)225 static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
226 {
227 return readl(STM_CNT(stm_timer->base));
228 }
229
nxp_stm_clockevent_disable(struct stm_timer * stm_timer)230 static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
231 {
232 writel(0, STM_CCR0(stm_timer->base));
233 }
234
nxp_stm_clockevent_enable(struct stm_timer * stm_timer)235 static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
236 {
237 writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
238 }
239
nxp_stm_clockevent_shutdown(struct clock_event_device * ced)240 static int nxp_stm_clockevent_shutdown(struct clock_event_device *ced)
241 {
242 struct stm_timer *stm_timer = ced_to_stm(ced);
243
244 nxp_stm_clockevent_disable(stm_timer);
245
246 return 0;
247 }
248
nxp_stm_clockevent_set_next_event(unsigned long delta,struct clock_event_device * ced)249 static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
250 {
251 struct stm_timer *stm_timer = ced_to_stm(ced);
252 u32 val;
253
254 nxp_stm_clockevent_disable(stm_timer);
255
256 stm_timer->delta = delta;
257
258 val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
259
260 writel(val, STM_CMP0(stm_timer->base));
261
262 /*
263 * The counter is shared across the channels and can not be
264 * stopped while we are setting the next event. If the delta
265 * is very small it is possible the counter increases above
266 * the computed 'val'. The min_delta value specified when
267 * registering the clockevent will prevent that. The second
268 * case is if the counter wraps while we compute the 'val' and
269 * before writing the comparator register. We read the counter,
270 * check if we are back in time and abort the timer with -ETIME.
271 */
272 if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
273 return -ETIME;
274
275 nxp_stm_clockevent_enable(stm_timer);
276
277 return 0;
278 }
279
nxp_stm_clockevent_set_periodic(struct clock_event_device * ced)280 static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
281 {
282 struct stm_timer *stm_timer = ced_to_stm(ced);
283
284 return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
285 }
286
nxp_stm_clockevent_suspend(struct clock_event_device * ced)287 static void nxp_stm_clockevent_suspend(struct clock_event_device *ced)
288 {
289 struct stm_timer *stm_timer = ced_to_stm(ced);
290
291 nxp_stm_module_put(stm_timer);
292 }
293
nxp_stm_clockevent_resume(struct clock_event_device * ced)294 static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
295 {
296 struct stm_timer *stm_timer = ced_to_stm(ced);
297
298 nxp_stm_module_get(stm_timer);
299 }
300
nxp_stm_clockevent_per_cpu_init(struct device * dev,struct stm_timer * stm_timer,const char * name,void __iomem * base,int irq,struct clk * clk,int cpu)301 static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
302 const char *name, void __iomem *base, int irq,
303 struct clk *clk, int cpu)
304 {
305 stm_timer->base = base;
306 stm_timer->rate = clk_get_rate(clk);
307
308 stm_timer->ced.name = name;
309 stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
310 stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
311 stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
312 stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
313 stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
314 stm_timer->ced.resume = nxp_stm_clockevent_resume;
315 stm_timer->ced.cpumask = cpumask_of(cpu);
316 stm_timer->ced.rating = 460;
317 stm_timer->ced.irq = irq;
318 stm_timer->ced.owner = THIS_MODULE;
319
320 per_cpu(stm_timers, cpu) = stm_timer;
321
322 nxp_stm_module_get(stm_timer);
323
324 dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
325
326 return 0;
327 }
328
nxp_stm_clockevent_starting_cpu(unsigned int cpu)329 static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
330 {
331 struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
332 int ret;
333
334 if (WARN_ON(!stm_timer))
335 return -EFAULT;
336
337 ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
338 if (ret)
339 return ret;
340
341 /*
342 * The timings measurement show reading the counter register
343 * and writing to the comparator register takes as a maximum
344 * value 1100 ns at 133MHz rate frequency. The timer must be
345 * set above this value and to be secure we set the minimum
346 * value equal to 2000ns, so 2us.
347 *
348 * minimum ticks = (rate / MICRO) * 2
349 */
350 clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
351 (stm_timer->rate / MICRO) * 2, ULONG_MAX);
352
353 return 0;
354 }
355
nxp_stm_module_interrupt(int irq,void * dev_id)356 static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
357 {
358 struct stm_timer *stm_timer = dev_id;
359 struct clock_event_device *ced = &stm_timer->ced;
360 u32 val;
361
362 /*
363 * The interrupt is shared across the channels in the
364 * module. But this one is configured to run only one channel,
365 * consequently it is pointless to test the interrupt flags
366 * before and we can directly reset the channel 0 irq flag
367 * register.
368 */
369 writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
370
371 /*
372 * Update STM_CMP value using the counter value
373 */
374 val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
375
376 writel(val, STM_CMP0(stm_timer->base));
377
378 /*
379 * stm hardware doesn't support oneshot, it will generate an
380 * interrupt and start the counter again so software needs to
381 * disable the timer to stop the counter loop in ONESHOT mode.
382 */
383 if (likely(clockevent_state_oneshot(ced)))
384 nxp_stm_clockevent_disable(stm_timer);
385
386 ced->event_handler(ced);
387
388 return IRQ_HANDLED;
389 }
390
nxp_stm_timer_probe(struct platform_device * pdev)391 static int __init nxp_stm_timer_probe(struct platform_device *pdev)
392 {
393 struct stm_timer *stm_timer;
394 struct device *dev = &pdev->dev;
395 struct device_node *np = dev->of_node;
396 const char *name = of_node_full_name(np);
397 struct clk *clk;
398 void __iomem *base;
399 int irq, ret;
400
401 /*
402 * The device tree can have multiple STM nodes described, so
403 * it makes this driver a good candidate for the async probe.
404 * It is still unclear if the time framework correctly handles
405 * parallel loading of the timers but at least this driver is
406 * ready to support the option.
407 */
408 guard(stm_instances)(&stm_instances_lock);
409
410 /*
411 * The S32Gx are SoCs featuring a diverse set of cores. Linux
412 * is expected to run on Cortex-A53 cores, while other
413 * software stacks will operate on Cortex-M cores. The number
414 * of STM instances has been sized to include at most one
415 * instance per core.
416 *
417 * As we need a clocksource and a clockevent per cpu, we
418 * simply initialize a clocksource per cpu along with the
419 * clockevent which makes the resulting code simpler.
420 *
421 * However if the device tree is describing more STM instances
422 * than the number of cores, then we ignore them.
423 */
424 if (stm_instances >= num_possible_cpus())
425 return 0;
426
427 base = devm_of_iomap(dev, np, 0, NULL);
428 if (IS_ERR(base))
429 return dev_err_probe(dev, PTR_ERR(base), "Failed to iomap %pOFn\n", np);
430
431 irq = platform_get_irq(pdev, 0);
432 if (irq < 0)
433 return dev_err_probe(dev, irq, "Failed to get IRQ\n");
434
435 clk = devm_clk_get_enabled(dev, NULL);
436 if (IS_ERR(clk))
437 return dev_err_probe(dev, PTR_ERR(clk), "Clock not found\n");
438
439 stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
440 if (!stm_timer)
441 return -ENOMEM;
442
443 ret = devm_request_irq(dev, irq, nxp_stm_module_interrupt,
444 IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
445 if (ret)
446 return dev_err_probe(dev, ret, "Unable to allocate interrupt line\n");
447
448 ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
449 if (ret)
450 return ret;
451
452 /*
453 * Next probed STM will be a per CPU clockevent, until we
454 * probe as many as we have CPUs available on the system, we
455 * do a partial initialization
456 */
457 ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
458 base, irq, clk,
459 stm_instances);
460 if (ret)
461 return ret;
462
463 stm_instances++;
464
465 /*
466 * The number of probed STMs for per CPU clockevent is
467 * equal to the number of available CPUs on the
468 * system. We install the cpu hotplug to finish the
469 * initialization by registering the clockevents
470 */
471 if (stm_instances == num_possible_cpus()) {
472 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
473 nxp_stm_clockevent_starting_cpu, NULL);
474 if (ret < 0)
475 return ret;
476 }
477
478 return 0;
479 }
480
481 static const struct of_device_id nxp_stm_of_match[] = {
482 { .compatible = "nxp,s32g2-stm" },
483 { }
484 };
485 MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
486
487 static struct platform_driver nxp_stm_probe = {
488 .probe = nxp_stm_timer_probe,
489 .driver = {
490 .name = "nxp-stm",
491 .of_match_table = nxp_stm_of_match,
492 },
493 };
494 module_platform_driver(nxp_stm_probe);
495
496 MODULE_DESCRIPTION("NXP System Timer Module driver");
497 MODULE_LICENSE("GPL");
498