xref: /linux/drivers/clocksource/dw_apb_timer.c (revision 175ae3ad59ab3459652bd2ae3bbc1785aeba1bf3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * (C) Copyright 2009 Intel Corporation
4  * Author: Jacob Pan (jacob.jun.pan@intel.com)
5  *
6  * Shared with ARM platforms, Jamie Iles, Picochip 2011
7  *
8  * Support for the Synopsys DesignWare APB Timers.
9  */
10 #include <linux/dw_apb_timer.h>
11 #include <linux/delay.h>
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/slab.h>
17 
18 #define APBT_MIN_PERIOD			4
19 #define APBT_MIN_DELTA_USEC		200
20 
21 #define APBTMR_N_LOAD_COUNT		0x00
22 #define APBTMR_N_CURRENT_VALUE		0x04
23 #define APBTMR_N_CONTROL		0x08
24 #define APBTMR_N_EOI			0x0c
25 #define APBTMR_N_INT_STATUS		0x10
26 
27 #define APBTMRS_INT_STATUS		0xa0
28 #define APBTMRS_EOI			0xa4
29 #define APBTMRS_RAW_INT_STATUS		0xa8
30 #define APBTMRS_COMP_VERSION		0xac
31 
32 #define APBTMR_CONTROL_ENABLE		(1 << 0)
33 /* 1: periodic, 0:free running. */
34 #define APBTMR_CONTROL_MODE_PERIODIC	(1 << 1)
35 #define APBTMR_CONTROL_INT		(1 << 2)
36 
37 static inline struct dw_apb_clock_event_device *
38 ced_to_dw_apb_ced(struct clock_event_device *evt)
39 {
40 	return container_of(evt, struct dw_apb_clock_event_device, ced);
41 }
42 
43 static inline struct dw_apb_clocksource *
44 clocksource_to_dw_apb_clocksource(struct clocksource *cs)
45 {
46 	return container_of(cs, struct dw_apb_clocksource, cs);
47 }
48 
49 static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
50 {
51 	return readl(timer->base + offs);
52 }
53 
54 static inline void apbt_writel(struct dw_apb_timer *timer, u32 val,
55 			unsigned long offs)
56 {
57 	writel(val, timer->base + offs);
58 }
59 
60 static inline u32 apbt_readl_relaxed(struct dw_apb_timer *timer, unsigned long offs)
61 {
62 	return readl_relaxed(timer->base + offs);
63 }
64 
65 static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val,
66 			unsigned long offs)
67 {
68 	writel_relaxed(val, timer->base + offs);
69 }
70 
71 static void apbt_disable_int(struct dw_apb_timer *timer)
72 {
73 	u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
74 
75 	ctrl |= APBTMR_CONTROL_INT;
76 	apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
77 }
78 
79 /**
80  * dw_apb_clockevent_pause() - stop the clock_event_device from running
81  *
82  * @dw_ced:	The APB clock to stop generating events.
83  */
84 void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
85 {
86 	disable_irq(dw_ced->timer.irq);
87 	apbt_disable_int(&dw_ced->timer);
88 }
89 
90 static void apbt_eoi(struct dw_apb_timer *timer)
91 {
92 	apbt_readl_relaxed(timer, APBTMR_N_EOI);
93 }
94 
95 static irqreturn_t dw_apb_clockevent_irq(int irq, void *data)
96 {
97 	struct clock_event_device *evt = data;
98 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
99 
100 	if (!evt->event_handler) {
101 		pr_info("Spurious APBT timer interrupt %d\n", irq);
102 		return IRQ_NONE;
103 	}
104 
105 	if (dw_ced->eoi)
106 		dw_ced->eoi(&dw_ced->timer);
107 
108 	evt->event_handler(evt);
109 	return IRQ_HANDLED;
110 }
111 
112 static void apbt_enable_int(struct dw_apb_timer *timer)
113 {
114 	u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
115 	/* clear pending intr */
116 	apbt_readl(timer, APBTMR_N_EOI);
117 	ctrl &= ~APBTMR_CONTROL_INT;
118 	apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
119 }
120 
121 static int apbt_shutdown(struct clock_event_device *evt)
122 {
123 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
124 	u32 ctrl;
125 
126 	pr_debug("%s CPU %d state=shutdown\n", __func__,
127 		 cpumask_first(evt->cpumask));
128 
129 	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
130 	ctrl &= ~APBTMR_CONTROL_ENABLE;
131 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
132 	return 0;
133 }
134 
135 static int apbt_set_oneshot(struct clock_event_device *evt)
136 {
137 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
138 	u32 ctrl;
139 
140 	pr_debug("%s CPU %d state=oneshot\n", __func__,
141 		 cpumask_first(evt->cpumask));
142 
143 	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
144 	/*
145 	 * set free running mode, this mode will let timer reload max
146 	 * timeout which will give time (3min on 25MHz clock) to rearm
147 	 * the next event, therefore emulate the one-shot mode.
148 	 */
149 	ctrl &= ~APBTMR_CONTROL_ENABLE;
150 	ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
151 
152 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
153 	/* write again to set free running mode */
154 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
155 
156 	/*
157 	 * DW APB p. 46, load counter with all 1s before starting free
158 	 * running mode.
159 	 */
160 	apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
161 	ctrl &= ~APBTMR_CONTROL_INT;
162 	ctrl |= APBTMR_CONTROL_ENABLE;
163 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
164 	return 0;
165 }
166 
167 static int apbt_set_periodic(struct clock_event_device *evt)
168 {
169 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
170 	unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
171 	u32 ctrl;
172 
173 	pr_debug("%s CPU %d state=periodic\n", __func__,
174 		 cpumask_first(evt->cpumask));
175 
176 	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
177 	ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
178 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
179 	/*
180 	 * DW APB p. 46, have to disable timer before load counter,
181 	 * may cause sync problem.
182 	 */
183 	ctrl &= ~APBTMR_CONTROL_ENABLE;
184 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
185 	udelay(1);
186 	pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
187 	apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
188 	ctrl |= APBTMR_CONTROL_ENABLE;
189 	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
190 	return 0;
191 }
192 
193 static int apbt_resume(struct clock_event_device *evt)
194 {
195 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
196 
197 	pr_debug("%s CPU %d state=resume\n", __func__,
198 		 cpumask_first(evt->cpumask));
199 
200 	apbt_enable_int(&dw_ced->timer);
201 	return 0;
202 }
203 
204 static int apbt_next_event(unsigned long delta,
205 			   struct clock_event_device *evt)
206 {
207 	u32 ctrl;
208 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
209 
210 	/* Disable timer */
211 	ctrl = apbt_readl_relaxed(&dw_ced->timer, APBTMR_N_CONTROL);
212 	ctrl &= ~APBTMR_CONTROL_ENABLE;
213 	apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
214 	/* write new count */
215 	apbt_writel_relaxed(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT);
216 	ctrl |= APBTMR_CONTROL_ENABLE;
217 	apbt_writel_relaxed(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
218 
219 	return 0;
220 }
221 
222 /**
223  * dw_apb_clockevent_init() - use an APB timer as a clock_event_device
224  *
225  * @cpu:	The CPU the events will be targeted at.
226  * @name:	The name used for the timer and the IRQ for it.
227  * @rating:	The rating to give the timer.
228  * @base:	I/O base for the timer registers.
229  * @irq:	The interrupt number to use for the timer.
230  * @freq:	The frequency that the timer counts at.
231  *
232  * This creates a clock_event_device for using with the generic clock layer
233  * but does not start and register it.  This should be done with
234  * dw_apb_clockevent_register() as the next step.  If this is the first time
235  * it has been called for a timer then the IRQ will be requested, if not it
236  * just be enabled to allow CPU hotplug to avoid repeatedly requesting and
237  * releasing the IRQ.
238  */
239 struct dw_apb_clock_event_device *
240 dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
241 		       void __iomem *base, int irq, unsigned long freq)
242 {
243 	struct dw_apb_clock_event_device *dw_ced =
244 		kzalloc(sizeof(*dw_ced), GFP_KERNEL);
245 	int err;
246 
247 	if (!dw_ced)
248 		return NULL;
249 
250 	dw_ced->timer.base = base;
251 	dw_ced->timer.irq = irq;
252 	dw_ced->timer.freq = freq;
253 
254 	clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD);
255 	dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff,
256 						       &dw_ced->ced);
257 	dw_ced->ced.max_delta_ticks = 0x7fffffff;
258 	dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
259 	dw_ced->ced.min_delta_ticks = 5000;
260 	dw_ced->ced.cpumask = cpumask_of(cpu);
261 	dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
262 				CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
263 	dw_ced->ced.set_state_shutdown = apbt_shutdown;
264 	dw_ced->ced.set_state_periodic = apbt_set_periodic;
265 	dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
266 	dw_ced->ced.set_state_oneshot_stopped = apbt_shutdown;
267 	dw_ced->ced.tick_resume = apbt_resume;
268 	dw_ced->ced.set_next_event = apbt_next_event;
269 	dw_ced->ced.irq = dw_ced->timer.irq;
270 	dw_ced->ced.rating = rating;
271 	dw_ced->ced.name = name;
272 
273 	dw_ced->eoi = apbt_eoi;
274 	err = request_irq(irq, dw_apb_clockevent_irq,
275 			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
276 			  dw_ced->ced.name, &dw_ced->ced);
277 	if (err) {
278 		pr_err("failed to request timer irq\n");
279 		kfree(dw_ced);
280 		dw_ced = NULL;
281 	}
282 
283 	return dw_ced;
284 }
285 
286 /**
287  * dw_apb_clockevent_resume() - resume a clock that has been paused.
288  *
289  * @dw_ced:	The APB clock to resume.
290  */
291 void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
292 {
293 	enable_irq(dw_ced->timer.irq);
294 }
295 
296 /**
297  * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
298  *
299  * @dw_ced:	The APB clock to stop generating the events.
300  */
301 void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
302 {
303 	free_irq(dw_ced->timer.irq, &dw_ced->ced);
304 }
305 
306 /**
307  * dw_apb_clockevent_register() - register the clock with the generic layer
308  *
309  * @dw_ced:	The APB clock to register as a clock_event_device.
310  */
311 void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced)
312 {
313 	apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL);
314 	clockevents_register_device(&dw_ced->ced);
315 	apbt_enable_int(&dw_ced->timer);
316 }
317 
318 /**
319  * dw_apb_clocksource_start() - start the clocksource counting.
320  *
321  * @dw_cs:	The clocksource to start.
322  *
323  * This is used to start the clocksource before registration and can be used
324  * to enable calibration of timers.
325  */
326 void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
327 {
328 	/*
329 	 * start count down from 0xffff_ffff. this is done by toggling the
330 	 * enable bit then load initial load count to ~0.
331 	 */
332 	u32 ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL);
333 
334 	ctrl &= ~APBTMR_CONTROL_ENABLE;
335 	apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
336 	apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT);
337 	/* enable, mask interrupt */
338 	ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
339 	ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
340 	apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
341 	/* read it once to get cached counter value initialized */
342 	dw_apb_clocksource_read(dw_cs);
343 }
344 
345 static u64 __apbt_read_clocksource(struct clocksource *cs)
346 {
347 	u32 current_count;
348 	struct dw_apb_clocksource *dw_cs =
349 		clocksource_to_dw_apb_clocksource(cs);
350 
351 	current_count = apbt_readl_relaxed(&dw_cs->timer,
352 					APBTMR_N_CURRENT_VALUE);
353 
354 	return (u64)~current_count;
355 }
356 
357 static void apbt_restart_clocksource(struct clocksource *cs)
358 {
359 	struct dw_apb_clocksource *dw_cs =
360 		clocksource_to_dw_apb_clocksource(cs);
361 
362 	dw_apb_clocksource_start(dw_cs);
363 }
364 
365 /**
366  * dw_apb_clocksource_init() - use an APB timer as a clocksource.
367  *
368  * @rating:	The rating to give the clocksource.
369  * @name:	The name for the clocksource.
370  * @base:	The I/O base for the timer registers.
371  * @freq:	The frequency that the timer counts at.
372  *
373  * This creates a clocksource using an APB timer but does not yet register it
374  * with the clocksource system.  This should be done with
375  * dw_apb_clocksource_register() as the next step.
376  */
377 struct dw_apb_clocksource *
378 dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
379 			unsigned long freq)
380 {
381 	struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL);
382 
383 	if (!dw_cs)
384 		return NULL;
385 
386 	dw_cs->timer.base = base;
387 	dw_cs->timer.freq = freq;
388 	dw_cs->cs.name = name;
389 	dw_cs->cs.rating = rating;
390 	dw_cs->cs.read = __apbt_read_clocksource;
391 	dw_cs->cs.mask = CLOCKSOURCE_MASK(32);
392 	dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
393 	dw_cs->cs.resume = apbt_restart_clocksource;
394 
395 	return dw_cs;
396 }
397 
398 /**
399  * dw_apb_clocksource_register() - register the APB clocksource.
400  *
401  * @dw_cs:	The clocksource to register.
402  */
403 void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
404 {
405 	clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq);
406 }
407 
408 /**
409  * dw_apb_clocksource_read() - read the current value of a clocksource.
410  *
411  * @dw_cs:	The clocksource to read.
412  */
413 u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
414 {
415 	return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
416 }
417