xref: /linux/drivers/clocksource/timer-tegra.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  *
5  * Author:
6  *	Colin Cross <ccross@google.com>
7  */
8 
9 #define pr_fmt(fmt)	"tegra-timer: " fmt
10 
11 #include <linux/clk.h>
12 #include <linux/clockchips.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/sched_clock.h>
22 #include <linux/time.h>
23 
24 #include "timer-of.h"
25 
26 #define RTC_SECONDS		0x08
27 #define RTC_SHADOW_SECONDS	0x0c
28 #define RTC_MILLISECONDS	0x10
29 
30 #define TIMERUS_CNTR_1US	0x10
31 #define TIMERUS_USEC_CFG	0x14
32 #define TIMERUS_CNTR_FREEZE	0x4c
33 
34 #define TIMER_PTV		0x0
35 #define TIMER_PTV_EN		BIT(31)
36 #define TIMER_PTV_PER		BIT(30)
37 #define TIMER_PCR		0x4
38 #define TIMER_PCR_INTR_CLR	BIT(30)
39 
40 #define TIMER1_BASE		0x00
41 #define TIMER2_BASE		0x08
42 #define TIMER3_BASE		0x50
43 #define TIMER4_BASE		0x58
44 #define TIMER10_BASE		0x90
45 
46 #define TIMER1_IRQ_IDX		0
47 #define TIMER10_IRQ_IDX		10
48 
49 #define TIMER_1MHz		1000000
50 
51 static u32 usec_config;
52 static void __iomem *timer_reg_base;
53 
54 static int tegra_timer_set_next_event(unsigned long cycles,
55 				      struct clock_event_device *evt)
56 {
57 	void __iomem *reg_base = timer_of_base(to_timer_of(evt));
58 
59 	/*
60 	 * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
61 	 * fire after one tick if 0 is loaded.
62 	 *
63 	 * The minimum and maximum numbers of oneshot ticks are defined
64 	 * by clockevents_config_and_register(1, 0x1fffffff + 1) invocation
65 	 * below in the code. Hence the cycles (ticks) can't be outside of
66 	 * a range supportable by hardware.
67 	 */
68 	writel_relaxed(TIMER_PTV_EN | (cycles - 1), reg_base + TIMER_PTV);
69 
70 	return 0;
71 }
72 
73 static int tegra_timer_shutdown(struct clock_event_device *evt)
74 {
75 	void __iomem *reg_base = timer_of_base(to_timer_of(evt));
76 
77 	writel_relaxed(0, reg_base + TIMER_PTV);
78 
79 	return 0;
80 }
81 
82 static int tegra_timer_set_periodic(struct clock_event_device *evt)
83 {
84 	void __iomem *reg_base = timer_of_base(to_timer_of(evt));
85 	unsigned long period = timer_of_period(to_timer_of(evt));
86 
87 	writel_relaxed(TIMER_PTV_EN | TIMER_PTV_PER | (period - 1),
88 		       reg_base + TIMER_PTV);
89 
90 	return 0;
91 }
92 
93 static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
94 {
95 	struct clock_event_device *evt = dev_id;
96 	void __iomem *reg_base = timer_of_base(to_timer_of(evt));
97 
98 	writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
99 	evt->event_handler(evt);
100 
101 	return IRQ_HANDLED;
102 }
103 
104 static void tegra_timer_suspend(struct clock_event_device *evt)
105 {
106 	void __iomem *reg_base = timer_of_base(to_timer_of(evt));
107 
108 	writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
109 }
110 
111 static void tegra_timer_resume(struct clock_event_device *evt)
112 {
113 	writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
114 }
115 
116 static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
117 	.flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
118 
119 	.clkevt = {
120 		.name = "tegra_timer",
121 		.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
122 		.set_next_event = tegra_timer_set_next_event,
123 		.set_state_shutdown = tegra_timer_shutdown,
124 		.set_state_periodic = tegra_timer_set_periodic,
125 		.set_state_oneshot = tegra_timer_shutdown,
126 		.tick_resume = tegra_timer_shutdown,
127 		.suspend = tegra_timer_suspend,
128 		.resume = tegra_timer_resume,
129 	},
130 };
131 
132 static int tegra_timer_setup(unsigned int cpu)
133 {
134 	struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
135 
136 	writel_relaxed(0, timer_of_base(to) + TIMER_PTV);
137 	writel_relaxed(TIMER_PCR_INTR_CLR, timer_of_base(to) + TIMER_PCR);
138 
139 	irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
140 	enable_irq(to->clkevt.irq);
141 
142 	/*
143 	 * Tegra's timer uses n+1 scheme for the counter, i.e. timer will
144 	 * fire after one tick if 0 is loaded and thus minimum number of
145 	 * ticks is 1. In result both of the clocksource's tick limits are
146 	 * higher than a minimum and maximum that hardware register can
147 	 * take by 1, this is then taken into account by set_next_event
148 	 * callback.
149 	 */
150 	clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
151 					1, /* min */
152 					0x1fffffff + 1); /* max 29 bits + 1 */
153 
154 	return 0;
155 }
156 
157 static int tegra_timer_stop(unsigned int cpu)
158 {
159 	struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
160 
161 	disable_irq_nosync(to->clkevt.irq);
162 
163 	return 0;
164 }
165 
166 static u64 notrace tegra_read_sched_clock(void)
167 {
168 	return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
169 }
170 
171 #ifdef CONFIG_ARM
172 static unsigned long tegra_delay_timer_read_counter_long(void)
173 {
174 	return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
175 }
176 
177 static struct delay_timer tegra_delay_timer = {
178 	.read_current_timer = tegra_delay_timer_read_counter_long,
179 	.freq = TIMER_1MHz,
180 };
181 #endif
182 
183 static struct timer_of suspend_rtc_to = {
184 	.flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
185 };
186 
187 /*
188  * tegra_rtc_read - Reads the Tegra RTC registers
189  * Care must be taken that this function is not called while the
190  * tegra_rtc driver could be executing to avoid race conditions
191  * on the RTC shadow register
192  */
193 static u64 tegra_rtc_read_ms(struct clocksource *cs)
194 {
195 	void __iomem *reg_base = timer_of_base(&suspend_rtc_to);
196 
197 	u32 ms = readl_relaxed(reg_base + RTC_MILLISECONDS);
198 	u32 s = readl_relaxed(reg_base + RTC_SHADOW_SECONDS);
199 
200 	return (u64)s * MSEC_PER_SEC + ms;
201 }
202 
203 static struct clocksource suspend_rtc_clocksource = {
204 	.name	= "tegra_suspend_timer",
205 	.rating	= 200,
206 	.read	= tegra_rtc_read_ms,
207 	.mask	= CLOCKSOURCE_MASK(32),
208 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
209 };
210 
211 static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
212 {
213 	if (tegra20) {
214 		switch (cpu) {
215 		case 0:
216 			return TIMER1_BASE;
217 		case 1:
218 			return TIMER2_BASE;
219 		case 2:
220 			return TIMER3_BASE;
221 		default:
222 			return TIMER4_BASE;
223 		}
224 	}
225 
226 	return TIMER10_BASE + cpu * 8;
227 }
228 
229 static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
230 {
231 	if (tegra20)
232 		return TIMER1_IRQ_IDX + cpu;
233 
234 	return TIMER10_IRQ_IDX + cpu;
235 }
236 
237 static inline unsigned long tegra_rate_for_timer(struct timer_of *to,
238 						 bool tegra20)
239 {
240 	/*
241 	 * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
242 	 * parent clock.
243 	 */
244 	if (tegra20)
245 		return TIMER_1MHz;
246 
247 	return timer_of_rate(to);
248 }
249 
250 static int __init tegra_init_timer(struct device_node *np, bool tegra20,
251 				   int rating)
252 {
253 	struct timer_of *to;
254 	int cpu, ret;
255 
256 	to = this_cpu_ptr(&tegra_to);
257 	ret = timer_of_init(np, to);
258 	if (ret)
259 		goto out;
260 
261 	timer_reg_base = timer_of_base(to);
262 
263 	/*
264 	 * Configure microsecond timers to have 1MHz clock
265 	 * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
266 	 * Uses n+1 scheme
267 	 */
268 	switch (timer_of_rate(to)) {
269 	case 12000000:
270 		usec_config = 0x000b; /* (11+1)/(0+1) */
271 		break;
272 	case 12800000:
273 		usec_config = 0x043f; /* (63+1)/(4+1) */
274 		break;
275 	case 13000000:
276 		usec_config = 0x000c; /* (12+1)/(0+1) */
277 		break;
278 	case 16800000:
279 		usec_config = 0x0453; /* (83+1)/(4+1) */
280 		break;
281 	case 19200000:
282 		usec_config = 0x045f; /* (95+1)/(4+1) */
283 		break;
284 	case 26000000:
285 		usec_config = 0x0019; /* (25+1)/(0+1) */
286 		break;
287 	case 38400000:
288 		usec_config = 0x04bf; /* (191+1)/(4+1) */
289 		break;
290 	case 48000000:
291 		usec_config = 0x002f; /* (47+1)/(0+1) */
292 		break;
293 	default:
294 		ret = -EINVAL;
295 		goto out;
296 	}
297 
298 	writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
299 
300 	for_each_possible_cpu(cpu) {
301 		struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
302 		unsigned long flags = IRQF_TIMER | IRQF_NOBALANCING;
303 		unsigned long rate = tegra_rate_for_timer(to, tegra20);
304 		unsigned int base = tegra_base_for_cpu(cpu, tegra20);
305 		unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
306 		unsigned int irq = irq_of_parse_and_map(np, idx);
307 
308 		if (!irq) {
309 			pr_err("failed to map irq for cpu%d\n", cpu);
310 			ret = -EINVAL;
311 			goto out_irq;
312 		}
313 
314 		cpu_to->clkevt.irq = irq;
315 		cpu_to->clkevt.rating = rating;
316 		cpu_to->clkevt.cpumask = cpumask_of(cpu);
317 		cpu_to->of_base.base = timer_reg_base + base;
318 		cpu_to->of_clk.period = rate / HZ;
319 		cpu_to->of_clk.rate = rate;
320 
321 		irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
322 
323 		ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr, flags,
324 				  cpu_to->clkevt.name, &cpu_to->clkevt);
325 		if (ret) {
326 			pr_err("failed to set up irq for cpu%d: %d\n",
327 			       cpu, ret);
328 			irq_dispose_mapping(cpu_to->clkevt.irq);
329 			cpu_to->clkevt.irq = 0;
330 			goto out_irq;
331 		}
332 	}
333 
334 	sched_clock_register(tegra_read_sched_clock, 32, TIMER_1MHz);
335 
336 	ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
337 				    "timer_us", TIMER_1MHz, 300, 32,
338 				    clocksource_mmio_readl_up);
339 	if (ret)
340 		pr_err("failed to register clocksource: %d\n", ret);
341 
342 #ifdef CONFIG_ARM
343 	register_current_timer_delay(&tegra_delay_timer);
344 #endif
345 
346 	ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
347 				"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
348 				tegra_timer_stop);
349 	if (ret)
350 		pr_err("failed to set up cpu hp state: %d\n", ret);
351 
352 	return ret;
353 
354 out_irq:
355 	for_each_possible_cpu(cpu) {
356 		struct timer_of *cpu_to;
357 
358 		cpu_to = per_cpu_ptr(&tegra_to, cpu);
359 		if (cpu_to->clkevt.irq) {
360 			free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
361 			irq_dispose_mapping(cpu_to->clkevt.irq);
362 		}
363 	}
364 
365 	to->of_base.base = timer_reg_base;
366 out:
367 	timer_of_cleanup(to);
368 
369 	return ret;
370 }
371 
372 static int __init tegra210_init_timer(struct device_node *np)
373 {
374 	/*
375 	 * Arch-timer can't survive across power cycle of CPU core and
376 	 * after CPUPORESET signal due to a system design shortcoming,
377 	 * hence tegra-timer is more preferable on Tegra210.
378 	 */
379 	return tegra_init_timer(np, false, 460);
380 }
381 TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra210_init_timer);
382 
383 static int __init tegra20_init_timer(struct device_node *np)
384 {
385 	int rating;
386 
387 	/*
388 	 * Tegra20 and Tegra30 have Cortex A9 CPU that has a TWD timer,
389 	 * that timer runs off the CPU clock and hence is subjected to
390 	 * a jitter caused by DVFS clock rate changes. Tegra-timer is
391 	 * more preferable for older Tegra's, while later SoC generations
392 	 * have arch-timer as a main per-CPU timer and it is not affected
393 	 * by DVFS changes.
394 	 */
395 	if (of_machine_is_compatible("nvidia,tegra20") ||
396 	    of_machine_is_compatible("nvidia,tegra30"))
397 		rating = 460;
398 	else
399 		rating = 330;
400 
401 	return tegra_init_timer(np, true, rating);
402 }
403 TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
404 
405 static int __init tegra20_init_rtc(struct device_node *np)
406 {
407 	int ret;
408 
409 	ret = timer_of_init(np, &suspend_rtc_to);
410 	if (ret)
411 		return ret;
412 
413 	return clocksource_register_hz(&suspend_rtc_clocksource, 1000);
414 }
415 TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
416