xref: /linux/drivers/clocksource/ingenic-timer.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ingenic SoCs TCU IRQ driver
4  * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
5  * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/clockchips.h>
11 #include <linux/clocksource.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/interrupt.h>
14 #include <linux/mfd/ingenic-tcu.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/of.h>
17 #include <linux/of_irq.h>
18 #include <linux/overflow.h>
19 #include <linux/platform_device.h>
20 #include <linux/regmap.h>
21 #include <linux/sched_clock.h>
22 
23 #include <dt-bindings/clock/ingenic,tcu.h>
24 
25 static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
26 
27 struct ingenic_soc_info {
28 	unsigned int num_channels;
29 };
30 
31 struct ingenic_tcu_timer {
32 	unsigned int cpu;
33 	unsigned int channel;
34 	struct clock_event_device cevt;
35 	struct clk *clk;
36 	char name[8];
37 };
38 
39 struct ingenic_tcu {
40 	struct regmap *map;
41 	struct device_node *np;
42 	struct clk *cs_clk;
43 	unsigned int cs_channel;
44 	struct clocksource cs;
45 	unsigned long pwm_channels_mask;
46 	struct ingenic_tcu_timer timers[];
47 };
48 
49 static struct ingenic_tcu *ingenic_tcu;
50 
ingenic_tcu_timer_read(void)51 static u64 notrace ingenic_tcu_timer_read(void)
52 {
53 	struct ingenic_tcu *tcu = ingenic_tcu;
54 	unsigned int count;
55 
56 	regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
57 
58 	return count;
59 }
60 
ingenic_tcu_timer_cs_read(struct clocksource * cs)61 static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
62 {
63 	return ingenic_tcu_timer_read();
64 }
65 
66 static inline struct ingenic_tcu *
to_ingenic_tcu(struct ingenic_tcu_timer * timer)67 to_ingenic_tcu(struct ingenic_tcu_timer *timer)
68 {
69 	return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
70 }
71 
72 static inline struct ingenic_tcu_timer *
to_ingenic_tcu_timer(struct clock_event_device * evt)73 to_ingenic_tcu_timer(struct clock_event_device *evt)
74 {
75 	return container_of(evt, struct ingenic_tcu_timer, cevt);
76 }
77 
ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device * evt)78 static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
79 {
80 	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
81 	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
82 
83 	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
84 
85 	return 0;
86 }
87 
ingenic_tcu_cevt_set_next(unsigned long next,struct clock_event_device * evt)88 static int ingenic_tcu_cevt_set_next(unsigned long next,
89 				     struct clock_event_device *evt)
90 {
91 	struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
92 	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
93 
94 	if (next > 0xffff)
95 		return -EINVAL;
96 
97 	regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
98 	regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
99 	regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
100 
101 	return 0;
102 }
103 
ingenic_per_cpu_event_handler(void * info)104 static void ingenic_per_cpu_event_handler(void *info)
105 {
106 	struct clock_event_device *cevt = (struct clock_event_device *) info;
107 
108 	cevt->event_handler(cevt);
109 }
110 
ingenic_tcu_cevt_cb(int irq,void * dev_id)111 static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
112 {
113 	struct ingenic_tcu_timer *timer = dev_id;
114 	struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
115 	call_single_data_t *csd;
116 
117 	regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
118 
119 	if (timer->cevt.event_handler) {
120 		csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
121 		csd->info = (void *) &timer->cevt;
122 		csd->func = ingenic_per_cpu_event_handler;
123 		smp_call_function_single_async(timer->cpu, csd);
124 	}
125 
126 	return IRQ_HANDLED;
127 }
128 
ingenic_tcu_get_clock(struct device_node * np,int id)129 static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
130 {
131 	struct of_phandle_args args;
132 
133 	args.np = np;
134 	args.args_count = 1;
135 	args.args[0] = id;
136 
137 	return of_clk_get_from_provider(&args);
138 }
139 
ingenic_tcu_setup_cevt(unsigned int cpu)140 static int ingenic_tcu_setup_cevt(unsigned int cpu)
141 {
142 	struct ingenic_tcu *tcu = ingenic_tcu;
143 	struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
144 	unsigned int timer_virq;
145 	struct irq_domain *domain;
146 	unsigned long rate;
147 	int err;
148 
149 	timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
150 	if (IS_ERR(timer->clk))
151 		return PTR_ERR(timer->clk);
152 
153 	err = clk_prepare_enable(timer->clk);
154 	if (err)
155 		goto err_clk_put;
156 
157 	rate = clk_get_rate(timer->clk);
158 	if (!rate) {
159 		err = -EINVAL;
160 		goto err_clk_disable;
161 	}
162 
163 	domain = irq_find_host(tcu->np);
164 	if (!domain) {
165 		err = -ENODEV;
166 		goto err_clk_disable;
167 	}
168 
169 	timer_virq = irq_create_mapping(domain, timer->channel);
170 	if (!timer_virq) {
171 		err = -EINVAL;
172 		goto err_clk_disable;
173 	}
174 
175 	snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
176 
177 	err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
178 			  timer->name, timer);
179 	if (err)
180 		goto err_irq_dispose_mapping;
181 
182 	timer->cpu = smp_processor_id();
183 	timer->cevt.cpumask = cpumask_of(smp_processor_id());
184 	timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
185 	timer->cevt.name = timer->name;
186 	timer->cevt.rating = 200;
187 	timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
188 	timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
189 
190 	clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
191 
192 	return 0;
193 
194 err_irq_dispose_mapping:
195 	irq_dispose_mapping(timer_virq);
196 err_clk_disable:
197 	clk_disable_unprepare(timer->clk);
198 err_clk_put:
199 	clk_put(timer->clk);
200 	return err;
201 }
202 
ingenic_tcu_clocksource_init(struct device_node * np,struct ingenic_tcu * tcu)203 static int __init ingenic_tcu_clocksource_init(struct device_node *np,
204 					       struct ingenic_tcu *tcu)
205 {
206 	unsigned int channel = tcu->cs_channel;
207 	struct clocksource *cs = &tcu->cs;
208 	unsigned long rate;
209 	int err;
210 
211 	tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
212 	if (IS_ERR(tcu->cs_clk))
213 		return PTR_ERR(tcu->cs_clk);
214 
215 	err = clk_prepare_enable(tcu->cs_clk);
216 	if (err)
217 		goto err_clk_put;
218 
219 	rate = clk_get_rate(tcu->cs_clk);
220 	if (!rate) {
221 		err = -EINVAL;
222 		goto err_clk_disable;
223 	}
224 
225 	/* Reset channel */
226 	regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
227 			   0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
228 
229 	/* Reset counter */
230 	regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
231 	regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
232 
233 	/* Enable channel */
234 	regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
235 
236 	cs->name = "ingenic-timer";
237 	cs->rating = 200;
238 	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
239 	cs->mask = CLOCKSOURCE_MASK(16);
240 	cs->read = ingenic_tcu_timer_cs_read;
241 
242 	err = clocksource_register_hz(cs, rate);
243 	if (err)
244 		goto err_clk_disable;
245 
246 	return 0;
247 
248 err_clk_disable:
249 	clk_disable_unprepare(tcu->cs_clk);
250 err_clk_put:
251 	clk_put(tcu->cs_clk);
252 	return err;
253 }
254 
255 static const struct ingenic_soc_info jz4740_soc_info = {
256 	.num_channels = 8,
257 };
258 
259 static const struct ingenic_soc_info jz4725b_soc_info = {
260 	.num_channels = 6,
261 };
262 
263 static const struct of_device_id ingenic_tcu_of_match[] = {
264 	{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
265 	{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
266 	{ .compatible = "ingenic,jz4760-tcu", .data = &jz4740_soc_info, },
267 	{ .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
268 	{ .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
269 	{ /* sentinel */ }
270 };
271 
ingenic_tcu_init(struct device_node * np)272 static int __init ingenic_tcu_init(struct device_node *np)
273 {
274 	const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
275 	const struct ingenic_soc_info *soc_info = id->data;
276 	struct ingenic_tcu_timer *timer;
277 	struct ingenic_tcu *tcu;
278 	struct regmap *map;
279 	unsigned int cpu;
280 	int ret, last_bit = -1;
281 	long rate;
282 
283 	of_node_clear_flag(np, OF_POPULATED);
284 
285 	map = device_node_to_regmap(np);
286 	if (IS_ERR(map))
287 		return PTR_ERR(map);
288 
289 	tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
290 		      GFP_KERNEL);
291 	if (!tcu)
292 		return -ENOMEM;
293 
294 	/*
295 	 * Enable all TCU channels for PWM use by default except channels 0/1,
296 	 * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
297 	 */
298 	tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
299 					 num_possible_cpus() + 1);
300 	of_property_read_u32(np, "ingenic,pwm-channels-mask",
301 			     (u32 *)&tcu->pwm_channels_mask);
302 
303 	/* Verify that we have at least num_possible_cpus() + 1 free channels */
304 	if (hweight8(tcu->pwm_channels_mask) >
305 			soc_info->num_channels - num_possible_cpus() + 1) {
306 		pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
307 			tcu->pwm_channels_mask);
308 		ret = -EINVAL;
309 		goto err_free_ingenic_tcu;
310 	}
311 
312 	tcu->map = map;
313 	tcu->np = np;
314 	ingenic_tcu = tcu;
315 
316 	for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
317 		timer = &tcu->timers[cpu];
318 
319 		timer->cpu = cpu;
320 		timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
321 						  soc_info->num_channels,
322 						  last_bit + 1);
323 		last_bit = timer->channel;
324 	}
325 
326 	tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
327 					     soc_info->num_channels,
328 					     last_bit + 1);
329 
330 	ret = ingenic_tcu_clocksource_init(np, tcu);
331 	if (ret) {
332 		pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
333 		goto err_free_ingenic_tcu;
334 	}
335 
336 	/* Setup clock events on each CPU core */
337 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
338 				ingenic_tcu_setup_cevt, NULL);
339 	if (ret < 0) {
340 		pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
341 		goto err_tcu_clocksource_cleanup;
342 	}
343 
344 	/* Register the sched_clock at the end as there's no way to undo it */
345 	rate = clk_get_rate(tcu->cs_clk);
346 	sched_clock_register(ingenic_tcu_timer_read, 16, rate);
347 
348 	return 0;
349 
350 err_tcu_clocksource_cleanup:
351 	clocksource_unregister(&tcu->cs);
352 	clk_disable_unprepare(tcu->cs_clk);
353 	clk_put(tcu->cs_clk);
354 err_free_ingenic_tcu:
355 	kfree(tcu);
356 	return ret;
357 }
358 
359 TIMER_OF_DECLARE(jz4740_tcu_intc,  "ingenic,jz4740-tcu",  ingenic_tcu_init);
360 TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
361 TIMER_OF_DECLARE(jz4760_tcu_intc,  "ingenic,jz4760-tcu",  ingenic_tcu_init);
362 TIMER_OF_DECLARE(jz4770_tcu_intc,  "ingenic,jz4770-tcu",  ingenic_tcu_init);
363 TIMER_OF_DECLARE(x1000_tcu_intc,  "ingenic,x1000-tcu",  ingenic_tcu_init);
364 
ingenic_tcu_probe(struct platform_device * pdev)365 static int __init ingenic_tcu_probe(struct platform_device *pdev)
366 {
367 	platform_set_drvdata(pdev, ingenic_tcu);
368 
369 	return 0;
370 }
371 
ingenic_tcu_suspend(struct device * dev)372 static int ingenic_tcu_suspend(struct device *dev)
373 {
374 	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
375 	unsigned int cpu;
376 
377 	clk_disable(tcu->cs_clk);
378 
379 	for (cpu = 0; cpu < num_online_cpus(); cpu++)
380 		clk_disable(tcu->timers[cpu].clk);
381 
382 	return 0;
383 }
384 
ingenic_tcu_resume(struct device * dev)385 static int ingenic_tcu_resume(struct device *dev)
386 {
387 	struct ingenic_tcu *tcu = dev_get_drvdata(dev);
388 	unsigned int cpu;
389 	int ret;
390 
391 	for (cpu = 0; cpu < num_online_cpus(); cpu++) {
392 		ret = clk_enable(tcu->timers[cpu].clk);
393 		if (ret)
394 			goto err_timer_clk_disable;
395 	}
396 
397 	ret = clk_enable(tcu->cs_clk);
398 	if (ret)
399 		goto err_timer_clk_disable;
400 
401 	return 0;
402 
403 err_timer_clk_disable:
404 	for (; cpu > 0; cpu--)
405 		clk_disable(tcu->timers[cpu - 1].clk);
406 	return ret;
407 }
408 
409 static const struct dev_pm_ops ingenic_tcu_pm_ops = {
410 	/* _noirq: We want the TCU clocks to be gated last / ungated first */
411 	.suspend_noirq = ingenic_tcu_suspend,
412 	.resume_noirq  = ingenic_tcu_resume,
413 };
414 
415 static struct platform_driver ingenic_tcu_driver = {
416 	.driver = {
417 		.name	= "ingenic-tcu-timer",
418 		.pm	= pm_sleep_ptr(&ingenic_tcu_pm_ops),
419 		.of_match_table = ingenic_tcu_of_match,
420 	},
421 };
422 builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);
423