xref: /linux/arch/powerpc/sysdev/mpic_timer.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MPIC timer driver
4  *
5  * Copyright 2013 Freescale Semiconductor, Inc.
6  * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
7  *	   Li Yang <leoli@freescale.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/of_irq.h>
21 #include <linux/syscore_ops.h>
22 #include <sysdev/fsl_soc.h>
23 #include <asm/io.h>
24 
25 #include <asm/mpic_timer.h>
26 
27 #define FSL_GLOBAL_TIMER		0x1
28 
29 /* Clock Ratio
30  * Divide by 64 0x00000300
31  * Divide by 32 0x00000200
32  * Divide by 16 0x00000100
33  * Divide by  8 0x00000000 (Hardware default div)
34  */
35 #define MPIC_TIMER_TCR_CLKDIV		0x00000300
36 
37 #define MPIC_TIMER_TCR_ROVR_OFFSET	24
38 
39 #define TIMER_STOP			0x80000000
40 #define GTCCR_TOG			0x80000000
41 #define TIMERS_PER_GROUP		4
42 #define MAX_TICKS			(~0U >> 1)
43 #define MAX_TICKS_CASCADE		(~0U)
44 #define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
45 
46 struct timer_regs {
47 	u32	gtccr;
48 	u32	res0[3];
49 	u32	gtbcr;
50 	u32	res1[3];
51 	u32	gtvpr;
52 	u32	res2[3];
53 	u32	gtdr;
54 	u32	res3[3];
55 };
56 
57 struct cascade_priv {
58 	u32 tcr_value;			/* TCR register: CASC & ROVR value */
59 	unsigned int cascade_map;	/* cascade map */
60 	unsigned int timer_num;		/* cascade control timer */
61 };
62 
63 struct timer_group_priv {
64 	struct timer_regs __iomem	*regs;
65 	struct mpic_timer		timer[TIMERS_PER_GROUP];
66 	struct list_head		node;
67 	unsigned int			timerfreq;
68 	unsigned int			idle;
69 	unsigned int			flags;
70 	spinlock_t			lock;
71 	void __iomem			*group_tcr;
72 };
73 
74 static struct cascade_priv cascade_timer[] = {
75 	/* cascade timer 0 and 1 */
76 	{0x1, 0xc, 0x1},
77 	/* cascade timer 1 and 2 */
78 	{0x2, 0x6, 0x2},
79 	/* cascade timer 2 and 3 */
80 	{0x4, 0x3, 0x3}
81 };
82 
83 static LIST_HEAD(timer_group_list);
84 
85 static void convert_ticks_to_time(struct timer_group_priv *priv,
86 		const u64 ticks, time64_t *time)
87 {
88 	*time = (u64)div_u64(ticks, priv->timerfreq);
89 }
90 
91 /* the time set by the user is converted to "ticks" */
92 static int convert_time_to_ticks(struct timer_group_priv *priv,
93 		time64_t time, u64 *ticks)
94 {
95 	u64 max_value;		/* prevent u64 overflow */
96 
97 	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
98 
99 	if (time > max_value)
100 		return -EINVAL;
101 
102 	*ticks = (u64)time * (u64)priv->timerfreq;
103 
104 	return 0;
105 }
106 
107 /* detect whether there is a cascade timer available */
108 static struct mpic_timer *detect_idle_cascade_timer(
109 					struct timer_group_priv *priv)
110 {
111 	struct cascade_priv *casc_priv;
112 	unsigned int map;
113 	unsigned int array_size = ARRAY_SIZE(cascade_timer);
114 	unsigned int num;
115 	unsigned int i;
116 	unsigned long flags;
117 
118 	casc_priv = cascade_timer;
119 	for (i = 0; i < array_size; i++) {
120 		spin_lock_irqsave(&priv->lock, flags);
121 		map = casc_priv->cascade_map & priv->idle;
122 		if (map == casc_priv->cascade_map) {
123 			num = casc_priv->timer_num;
124 			priv->timer[num].cascade_handle = casc_priv;
125 
126 			/* set timer busy */
127 			priv->idle &= ~casc_priv->cascade_map;
128 			spin_unlock_irqrestore(&priv->lock, flags);
129 			return &priv->timer[num];
130 		}
131 		spin_unlock_irqrestore(&priv->lock, flags);
132 		casc_priv++;
133 	}
134 
135 	return NULL;
136 }
137 
138 static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
139 		unsigned int num)
140 {
141 	struct cascade_priv *casc_priv;
142 	u32 tcr;
143 	u32 tmp_ticks;
144 	u32 rem_ticks;
145 
146 	/* set group tcr reg for cascade */
147 	casc_priv = priv->timer[num].cascade_handle;
148 	if (!casc_priv)
149 		return -EINVAL;
150 
151 	tcr = casc_priv->tcr_value |
152 		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
153 	setbits32(priv->group_tcr, tcr);
154 
155 	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
156 
157 	out_be32(&priv->regs[num].gtccr, 0);
158 	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
159 
160 	out_be32(&priv->regs[num - 1].gtccr, 0);
161 	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
162 
163 	return 0;
164 }
165 
166 static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
167 					u64 ticks)
168 {
169 	struct mpic_timer *allocated_timer;
170 
171 	/* Two cascade timers: Support the maximum time */
172 	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
173 	int ret;
174 
175 	if (ticks > max_ticks)
176 		return NULL;
177 
178 	/* detect idle timer */
179 	allocated_timer = detect_idle_cascade_timer(priv);
180 	if (!allocated_timer)
181 		return NULL;
182 
183 	/* set ticks to timer */
184 	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
185 	if (ret < 0)
186 		return NULL;
187 
188 	return allocated_timer;
189 }
190 
191 static struct mpic_timer *get_timer(time64_t time)
192 {
193 	struct timer_group_priv *priv;
194 	struct mpic_timer *timer;
195 
196 	u64 ticks;
197 	unsigned int num;
198 	unsigned int i;
199 	unsigned long flags;
200 	int ret;
201 
202 	list_for_each_entry(priv, &timer_group_list, node) {
203 		ret = convert_time_to_ticks(priv, time, &ticks);
204 		if (ret < 0)
205 			return NULL;
206 
207 		if (ticks > MAX_TICKS) {
208 			if (!(priv->flags & FSL_GLOBAL_TIMER))
209 				return NULL;
210 
211 			timer = get_cascade_timer(priv, ticks);
212 			if (!timer)
213 				continue;
214 
215 			return timer;
216 		}
217 
218 		for (i = 0; i < TIMERS_PER_GROUP; i++) {
219 			/* one timer: Reverse allocation */
220 			num = TIMERS_PER_GROUP - 1 - i;
221 			spin_lock_irqsave(&priv->lock, flags);
222 			if (priv->idle & (1 << i)) {
223 				/* set timer busy */
224 				priv->idle &= ~(1 << i);
225 				/* set ticks & stop timer */
226 				out_be32(&priv->regs[num].gtbcr,
227 					ticks | TIMER_STOP);
228 				out_be32(&priv->regs[num].gtccr, 0);
229 				priv->timer[num].cascade_handle = NULL;
230 				spin_unlock_irqrestore(&priv->lock, flags);
231 				return &priv->timer[num];
232 			}
233 			spin_unlock_irqrestore(&priv->lock, flags);
234 		}
235 	}
236 
237 	return NULL;
238 }
239 
240 /**
241  * mpic_start_timer - start hardware timer
242  * @handle: the timer to be started.
243  *
244  * It will do ->fn(->dev) callback from the hardware interrupt at
245  * the 'time64_t' point in the future.
246  */
247 void mpic_start_timer(struct mpic_timer *handle)
248 {
249 	struct timer_group_priv *priv = container_of(handle,
250 			struct timer_group_priv, timer[handle->num]);
251 
252 	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
253 }
254 EXPORT_SYMBOL(mpic_start_timer);
255 
256 /**
257  * mpic_stop_timer - stop hardware timer
258  * @handle: the timer to be stopped
259  *
260  * The timer periodically generates an interrupt. Unless user stops the timer.
261  */
262 void mpic_stop_timer(struct mpic_timer *handle)
263 {
264 	struct timer_group_priv *priv = container_of(handle,
265 			struct timer_group_priv, timer[handle->num]);
266 	struct cascade_priv *casc_priv;
267 
268 	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
269 
270 	casc_priv = priv->timer[handle->num].cascade_handle;
271 	if (casc_priv) {
272 		out_be32(&priv->regs[handle->num].gtccr, 0);
273 		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
274 	} else {
275 		out_be32(&priv->regs[handle->num].gtccr, 0);
276 	}
277 }
278 EXPORT_SYMBOL(mpic_stop_timer);
279 
280 /**
281  * mpic_get_remain_time - get timer time
282  * @handle: the timer to be selected.
283  * @time: time for timer
284  *
285  * Query timer remaining time.
286  */
287 void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
288 {
289 	struct timer_group_priv *priv = container_of(handle,
290 			struct timer_group_priv, timer[handle->num]);
291 	struct cascade_priv *casc_priv;
292 
293 	u64 ticks;
294 	u32 tmp_ticks;
295 
296 	casc_priv = priv->timer[handle->num].cascade_handle;
297 	if (casc_priv) {
298 		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
299 		tmp_ticks &= ~GTCCR_TOG;
300 		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
301 		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
302 		ticks += tmp_ticks;
303 	} else {
304 		ticks = in_be32(&priv->regs[handle->num].gtccr);
305 		ticks &= ~GTCCR_TOG;
306 	}
307 
308 	convert_ticks_to_time(priv, ticks, time);
309 }
310 EXPORT_SYMBOL(mpic_get_remain_time);
311 
312 /**
313  * mpic_free_timer - free hardware timer
314  * @handle: the timer to be removed.
315  *
316  * Free the timer.
317  *
318  * Note: can not be used in interrupt context.
319  */
320 void mpic_free_timer(struct mpic_timer *handle)
321 {
322 	struct timer_group_priv *priv = container_of(handle,
323 			struct timer_group_priv, timer[handle->num]);
324 
325 	struct cascade_priv *casc_priv;
326 	unsigned long flags;
327 
328 	mpic_stop_timer(handle);
329 
330 	casc_priv = priv->timer[handle->num].cascade_handle;
331 
332 	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
333 
334 	spin_lock_irqsave(&priv->lock, flags);
335 	if (casc_priv) {
336 		u32 tcr;
337 		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
338 					MPIC_TIMER_TCR_ROVR_OFFSET);
339 		clrbits32(priv->group_tcr, tcr);
340 		priv->idle |= casc_priv->cascade_map;
341 		priv->timer[handle->num].cascade_handle = NULL;
342 	} else {
343 		priv->idle |= TIMER_OFFSET(handle->num);
344 	}
345 	spin_unlock_irqrestore(&priv->lock, flags);
346 }
347 EXPORT_SYMBOL(mpic_free_timer);
348 
349 /**
350  * mpic_request_timer - get a hardware timer
351  * @fn: interrupt handler function
352  * @dev: callback function of the data
353  * @time: time for timer
354  *
355  * This executes the "request_irq", returning NULL
356  * else "handle" on success.
357  */
358 struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
359 				      time64_t time)
360 {
361 	struct mpic_timer *allocated_timer;
362 	int ret;
363 
364 	if (list_empty(&timer_group_list))
365 		return NULL;
366 
367 	if (time < 0)
368 		return NULL;
369 
370 	allocated_timer = get_timer(time);
371 	if (!allocated_timer)
372 		return NULL;
373 
374 	ret = request_irq(allocated_timer->irq, fn,
375 			IRQF_TRIGGER_LOW, "global-timer", dev);
376 	if (ret) {
377 		mpic_free_timer(allocated_timer);
378 		return NULL;
379 	}
380 
381 	allocated_timer->dev = dev;
382 
383 	return allocated_timer;
384 }
385 EXPORT_SYMBOL(mpic_request_timer);
386 
387 static int __init timer_group_get_freq(struct device_node *np,
388 			struct timer_group_priv *priv)
389 {
390 	u32 div;
391 
392 	if (priv->flags & FSL_GLOBAL_TIMER) {
393 		struct device_node *dn;
394 
395 		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
396 		if (dn) {
397 			of_property_read_u32(dn, "clock-frequency",
398 					&priv->timerfreq);
399 			of_node_put(dn);
400 		}
401 	}
402 
403 	if (priv->timerfreq <= 0)
404 		return -EINVAL;
405 
406 	if (priv->flags & FSL_GLOBAL_TIMER) {
407 		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
408 		priv->timerfreq /= div;
409 	}
410 
411 	return 0;
412 }
413 
414 static int __init timer_group_get_irq(struct device_node *np,
415 		struct timer_group_priv *priv)
416 {
417 	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
418 	const u32 *p;
419 	u32 offset;
420 	u32 count;
421 
422 	unsigned int i;
423 	unsigned int j;
424 	unsigned int irq_index = 0;
425 	unsigned int irq;
426 	int len;
427 
428 	p = of_get_property(np, "fsl,available-ranges", &len);
429 	if (p && len % (2 * sizeof(u32)) != 0) {
430 		pr_err("%pOF: malformed available-ranges property.\n", np);
431 		return -EINVAL;
432 	}
433 
434 	if (!p) {
435 		p = all_timer;
436 		len = sizeof(all_timer);
437 	}
438 
439 	len /= 2 * sizeof(u32);
440 
441 	for (i = 0; i < len; i++) {
442 		offset = p[i * 2];
443 		count = p[i * 2 + 1];
444 		for (j = 0; j < count; j++) {
445 			irq = irq_of_parse_and_map(np, irq_index);
446 			if (!irq) {
447 				pr_err("%pOF: irq parse and map failed.\n", np);
448 				return -EINVAL;
449 			}
450 
451 			/* Set timer idle */
452 			priv->idle |= TIMER_OFFSET((offset + j));
453 			priv->timer[offset + j].irq = irq;
454 			priv->timer[offset + j].num = offset + j;
455 			irq_index++;
456 		}
457 	}
458 
459 	return 0;
460 }
461 
462 static void __init timer_group_init(struct device_node *np)
463 {
464 	struct timer_group_priv *priv;
465 	unsigned int i = 0;
466 	int ret;
467 
468 	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
469 	if (!priv) {
470 		pr_err("%pOF: cannot allocate memory for group.\n", np);
471 		return;
472 	}
473 
474 	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
475 		priv->flags |= FSL_GLOBAL_TIMER;
476 
477 	priv->regs = of_iomap(np, i++);
478 	if (!priv->regs) {
479 		pr_err("%pOF: cannot ioremap timer register address.\n", np);
480 		goto out;
481 	}
482 
483 	if (priv->flags & FSL_GLOBAL_TIMER) {
484 		priv->group_tcr = of_iomap(np, i++);
485 		if (!priv->group_tcr) {
486 			pr_err("%pOF: cannot ioremap tcr address.\n", np);
487 			goto out;
488 		}
489 	}
490 
491 	ret = timer_group_get_freq(np, priv);
492 	if (ret < 0) {
493 		pr_err("%pOF: cannot get timer frequency.\n", np);
494 		goto out;
495 	}
496 
497 	ret = timer_group_get_irq(np, priv);
498 	if (ret < 0) {
499 		pr_err("%pOF: cannot get timer irqs.\n", np);
500 		goto out;
501 	}
502 
503 	spin_lock_init(&priv->lock);
504 
505 	/* Init FSL timer hardware */
506 	if (priv->flags & FSL_GLOBAL_TIMER)
507 		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
508 
509 	list_add_tail(&priv->node, &timer_group_list);
510 
511 	return;
512 
513 out:
514 	if (priv->regs)
515 		iounmap(priv->regs);
516 
517 	if (priv->group_tcr)
518 		iounmap(priv->group_tcr);
519 
520 	kfree(priv);
521 }
522 
523 static void mpic_timer_resume(void)
524 {
525 	struct timer_group_priv *priv;
526 
527 	list_for_each_entry(priv, &timer_group_list, node) {
528 		/* Init FSL timer hardware */
529 		if (priv->flags & FSL_GLOBAL_TIMER)
530 			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
531 	}
532 }
533 
534 static const struct of_device_id mpic_timer_ids[] = {
535 	{ .compatible = "fsl,mpic-global-timer", },
536 	{},
537 };
538 
539 static struct syscore_ops mpic_timer_syscore_ops = {
540 	.resume = mpic_timer_resume,
541 };
542 
543 static int __init mpic_timer_init(void)
544 {
545 	struct device_node *np = NULL;
546 
547 	for_each_matching_node(np, mpic_timer_ids)
548 		timer_group_init(np);
549 
550 	register_syscore_ops(&mpic_timer_syscore_ops);
551 
552 	if (list_empty(&timer_group_list))
553 		return -ENODEV;
554 
555 	return 0;
556 }
557 subsys_initcall(mpic_timer_init);
558