xref: /linux/drivers/clocksource/timer-davinci.c (revision 5e3992fe72748ed3892be876f09d4d990548b7af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI DaVinci clocksource driver
4  *
5  * Copyright (C) 2019 Texas Instruments
6  * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7  * (with tiny parts adopted from code by Kevin Hilman <khilman@baylibre.com>)
8  */
9 
10 #define pr_fmt(fmt) "%s: " fmt, __func__
11 
12 #include <linux/clk.h>
13 #include <linux/clockchips.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/sched_clock.h>
19 
20 #include <clocksource/timer-davinci.h>
21 
22 #define DAVINCI_TIMER_REG_TIM12			0x10
23 #define DAVINCI_TIMER_REG_TIM34			0x14
24 #define DAVINCI_TIMER_REG_PRD12			0x18
25 #define DAVINCI_TIMER_REG_PRD34			0x1c
26 #define DAVINCI_TIMER_REG_TCR			0x20
27 #define DAVINCI_TIMER_REG_TGCR			0x24
28 
29 #define DAVINCI_TIMER_TIMMODE_MASK		GENMASK(3, 2)
30 #define DAVINCI_TIMER_RESET_MASK		GENMASK(1, 0)
31 #define DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED	BIT(2)
32 #define DAVINCI_TIMER_UNRESET			GENMASK(1, 0)
33 
34 #define DAVINCI_TIMER_ENAMODE_MASK		GENMASK(1, 0)
35 #define DAVINCI_TIMER_ENAMODE_DISABLED		0x00
36 #define DAVINCI_TIMER_ENAMODE_ONESHOT		BIT(0)
37 #define DAVINCI_TIMER_ENAMODE_PERIODIC		BIT(1)
38 
39 #define DAVINCI_TIMER_ENAMODE_SHIFT_TIM12	6
40 #define DAVINCI_TIMER_ENAMODE_SHIFT_TIM34	22
41 
42 #define DAVINCI_TIMER_MIN_DELTA			0x01
43 #define DAVINCI_TIMER_MAX_DELTA			0xfffffffe
44 
45 #define DAVINCI_TIMER_CLKSRC_BITS		32
46 
47 #define DAVINCI_TIMER_TGCR_DEFAULT \
48 		(DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED | DAVINCI_TIMER_UNRESET)
49 
50 struct davinci_clockevent {
51 	struct clock_event_device dev;
52 	void __iomem *base;
53 	unsigned int cmp_off;
54 };
55 
56 /*
57  * This must be globally accessible by davinci_timer_read_sched_clock(), so
58  * let's keep it here.
59  */
60 static struct {
61 	struct clocksource dev;
62 	void __iomem *base;
63 	unsigned int tim_off;
64 } davinci_clocksource;
65 
66 static struct davinci_clockevent *
67 to_davinci_clockevent(struct clock_event_device *clockevent)
68 {
69 	return container_of(clockevent, struct davinci_clockevent, dev);
70 }
71 
72 static unsigned int
73 davinci_clockevent_read(struct davinci_clockevent *clockevent,
74 			unsigned int reg)
75 {
76 	return readl_relaxed(clockevent->base + reg);
77 }
78 
79 static void davinci_clockevent_write(struct davinci_clockevent *clockevent,
80 				     unsigned int reg, unsigned int val)
81 {
82 	writel_relaxed(val, clockevent->base + reg);
83 }
84 
85 static void davinci_tim12_shutdown(void __iomem *base)
86 {
87 	unsigned int tcr;
88 
89 	tcr = DAVINCI_TIMER_ENAMODE_DISABLED <<
90 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
91 	/*
92 	 * This function is only ever called if we're using both timer
93 	 * halves. In this case TIM34 runs in periodic mode and we must
94 	 * not modify it.
95 	 */
96 	tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
97 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
98 
99 	writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
100 }
101 
102 static void davinci_tim12_set_oneshot(void __iomem *base)
103 {
104 	unsigned int tcr;
105 
106 	tcr = DAVINCI_TIMER_ENAMODE_ONESHOT <<
107 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
108 	/* Same as above. */
109 	tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
110 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
111 
112 	writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
113 }
114 
115 static int davinci_clockevent_shutdown(struct clock_event_device *dev)
116 {
117 	struct davinci_clockevent *clockevent;
118 
119 	clockevent = to_davinci_clockevent(dev);
120 
121 	davinci_tim12_shutdown(clockevent->base);
122 
123 	return 0;
124 }
125 
126 static int davinci_clockevent_set_oneshot(struct clock_event_device *dev)
127 {
128 	struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
129 
130 	davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
131 
132 	davinci_tim12_set_oneshot(clockevent->base);
133 
134 	return 0;
135 }
136 
137 static int
138 davinci_clockevent_set_next_event_std(unsigned long cycles,
139 				      struct clock_event_device *dev)
140 {
141 	struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
142 
143 	davinci_clockevent_shutdown(dev);
144 
145 	davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
146 	davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_PRD12, cycles);
147 
148 	davinci_clockevent_set_oneshot(dev);
149 
150 	return 0;
151 }
152 
153 static int
154 davinci_clockevent_set_next_event_cmp(unsigned long cycles,
155 				      struct clock_event_device *dev)
156 {
157 	struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
158 	unsigned int curr_time;
159 
160 	curr_time = davinci_clockevent_read(clockevent,
161 					    DAVINCI_TIMER_REG_TIM12);
162 	davinci_clockevent_write(clockevent,
163 				 clockevent->cmp_off, curr_time + cycles);
164 
165 	return 0;
166 }
167 
168 static irqreturn_t davinci_timer_irq_timer(int irq, void *data)
169 {
170 	struct davinci_clockevent *clockevent = data;
171 
172 	if (!clockevent_state_oneshot(&clockevent->dev))
173 		davinci_tim12_shutdown(clockevent->base);
174 
175 	clockevent->dev.event_handler(&clockevent->dev);
176 
177 	return IRQ_HANDLED;
178 }
179 
180 static u64 notrace davinci_timer_read_sched_clock(void)
181 {
182 	return readl_relaxed(davinci_clocksource.base +
183 			     davinci_clocksource.tim_off);
184 }
185 
186 static u64 davinci_clocksource_read(struct clocksource *dev)
187 {
188 	return davinci_timer_read_sched_clock();
189 }
190 
191 /*
192  * Standard use-case: we're using tim12 for clockevent and tim34 for
193  * clocksource. The default is making the former run in oneshot mode
194  * and the latter in periodic mode.
195  */
196 static void davinci_clocksource_init_tim34(void __iomem *base)
197 {
198 	int tcr;
199 
200 	tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
201 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
202 	tcr |= DAVINCI_TIMER_ENAMODE_ONESHOT <<
203 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
204 
205 	writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
206 	writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD34);
207 	writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
208 }
209 
210 /*
211  * Special use-case on da830: the DSP may use tim34. We're using tim12 for
212  * both clocksource and clockevent. We set tim12 to periodic and don't touch
213  * tim34.
214  */
215 static void davinci_clocksource_init_tim12(void __iomem *base)
216 {
217 	unsigned int tcr;
218 
219 	tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
220 		DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
221 
222 	writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
223 	writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD12);
224 	writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
225 }
226 
227 static void davinci_timer_init(void __iomem *base)
228 {
229 	/* Set clock to internal mode and disable it. */
230 	writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TCR);
231 	/*
232 	 * Reset both 32-bit timers, set no prescaler for timer 34, set the
233 	 * timer to dual 32-bit unchained mode, unreset both 32-bit timers.
234 	 */
235 	writel_relaxed(DAVINCI_TIMER_TGCR_DEFAULT,
236 		       base + DAVINCI_TIMER_REG_TGCR);
237 	/* Init both counters to zero. */
238 	writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
239 	writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
240 }
241 
242 int __init davinci_timer_register(struct clk *clk,
243 				  const struct davinci_timer_cfg *timer_cfg)
244 {
245 	struct davinci_clockevent *clockevent;
246 	unsigned int tick_rate;
247 	void __iomem *base;
248 	int rv;
249 
250 	rv = clk_prepare_enable(clk);
251 	if (rv) {
252 		pr_err("Unable to prepare and enable the timer clock\n");
253 		return rv;
254 	}
255 
256 	if (!request_mem_region(timer_cfg->reg.start,
257 				resource_size(&timer_cfg->reg),
258 				"davinci-timer")) {
259 		pr_err("Unable to request memory region\n");
260 		rv = -EBUSY;
261 		goto exit_clk_disable;
262 	}
263 
264 	base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
265 	if (!base) {
266 		pr_err("Unable to map the register range\n");
267 		rv = -ENOMEM;
268 		goto exit_mem_region;
269 	}
270 
271 	davinci_timer_init(base);
272 	tick_rate = clk_get_rate(clk);
273 
274 	clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
275 	if (!clockevent) {
276 		rv = -ENOMEM;
277 		goto exit_iounmap_base;
278 	}
279 
280 	clockevent->dev.name = "tim12";
281 	clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
282 	clockevent->dev.cpumask = cpumask_of(0);
283 	clockevent->base = base;
284 
285 	if (timer_cfg->cmp_off) {
286 		clockevent->cmp_off = timer_cfg->cmp_off;
287 		clockevent->dev.set_next_event =
288 				davinci_clockevent_set_next_event_cmp;
289 	} else {
290 		clockevent->dev.set_next_event =
291 				davinci_clockevent_set_next_event_std;
292 		clockevent->dev.set_state_oneshot =
293 				davinci_clockevent_set_oneshot;
294 		clockevent->dev.set_state_shutdown =
295 				davinci_clockevent_shutdown;
296 	}
297 
298 	rv = request_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
299 			 davinci_timer_irq_timer, IRQF_TIMER,
300 			 "clockevent/tim12", clockevent);
301 	if (rv) {
302 		pr_err("Unable to request the clockevent interrupt\n");
303 		goto exit_free_clockevent;
304 	}
305 
306 	davinci_clocksource.dev.rating = 300;
307 	davinci_clocksource.dev.read = davinci_clocksource_read;
308 	davinci_clocksource.dev.mask =
309 			CLOCKSOURCE_MASK(DAVINCI_TIMER_CLKSRC_BITS);
310 	davinci_clocksource.dev.flags = CLOCK_SOURCE_IS_CONTINUOUS;
311 	davinci_clocksource.base = base;
312 
313 	if (timer_cfg->cmp_off) {
314 		davinci_clocksource.dev.name = "tim12";
315 		davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM12;
316 		davinci_clocksource_init_tim12(base);
317 	} else {
318 		davinci_clocksource.dev.name = "tim34";
319 		davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM34;
320 		davinci_clocksource_init_tim34(base);
321 	}
322 
323 	clockevents_config_and_register(&clockevent->dev, tick_rate,
324 					DAVINCI_TIMER_MIN_DELTA,
325 					DAVINCI_TIMER_MAX_DELTA);
326 
327 	rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
328 	if (rv) {
329 		pr_err("Unable to register clocksource\n");
330 		goto exit_free_irq;
331 	}
332 
333 	sched_clock_register(davinci_timer_read_sched_clock,
334 			     DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
335 
336 	return 0;
337 
338 exit_free_irq:
339 	free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
340 			clockevent);
341 exit_free_clockevent:
342 	kfree(clockevent);
343 exit_iounmap_base:
344 	iounmap(base);
345 exit_mem_region:
346 	release_mem_region(timer_cfg->reg.start,
347 			   resource_size(&timer_cfg->reg));
348 exit_clk_disable:
349 	clk_disable_unprepare(clk);
350 	return rv;
351 }
352 
353 static int __init of_davinci_timer_register(struct device_node *np)
354 {
355 	struct davinci_timer_cfg timer_cfg = { };
356 	struct clk *clk;
357 	int rv;
358 
359 	rv = of_address_to_resource(np, 0, &timer_cfg.reg);
360 	if (rv) {
361 		pr_err("Unable to get the register range for timer\n");
362 		return rv;
363 	}
364 
365 	rv = of_irq_to_resource_table(np, timer_cfg.irq,
366 				      DAVINCI_TIMER_NUM_IRQS);
367 	if (rv != DAVINCI_TIMER_NUM_IRQS) {
368 		pr_err("Unable to get the interrupts for timer\n");
369 		return rv;
370 	}
371 
372 	clk = of_clk_get(np, 0);
373 	if (IS_ERR(clk)) {
374 		pr_err("Unable to get the timer clock\n");
375 		return PTR_ERR(clk);
376 	}
377 
378 	rv = davinci_timer_register(clk, &timer_cfg);
379 	if (rv)
380 		clk_put(clk);
381 
382 	return rv;
383 }
384 TIMER_OF_DECLARE(davinci_timer, "ti,da830-timer", of_davinci_timer_register);
385