xref: /linux/drivers/clocksource/timer-imx-tpm.c (revision 1a371190a375f98c9b106f758ea41558c3f92556)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright 2016 Freescale Semiconductor, Inc.
4 // Copyright 2017 NXP
5 
6 #include <linux/clk.h>
7 #include <linux/clockchips.h>
8 #include <linux/clocksource.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/sched_clock.h>
12 
13 #include "timer-of.h"
14 
15 #define TPM_PARAM			0x4
16 #define TPM_PARAM_WIDTH_SHIFT		16
17 #define TPM_PARAM_WIDTH_MASK		(0xff << 16)
18 #define TPM_SC				0x10
19 #define TPM_SC_CMOD_INC_PER_CNT		(0x1 << 3)
20 #define TPM_SC_CMOD_DIV_DEFAULT		0x3
21 #define TPM_SC_CMOD_DIV_MAX		0x7
22 #define TPM_SC_TOF_MASK			(0x1 << 7)
23 #define TPM_CNT				0x14
24 #define TPM_MOD				0x18
25 #define TPM_STATUS			0x1c
26 #define TPM_STATUS_CH0F			BIT(0)
27 #define TPM_C0SC			0x20
28 #define TPM_C0SC_CHIE			BIT(6)
29 #define TPM_C0SC_MODE_SHIFT		2
30 #define TPM_C0SC_MODE_MASK		0x3c
31 #define TPM_C0SC_MODE_SW_COMPARE	0x4
32 #define TPM_C0SC_CHF_MASK		(0x1 << 7)
33 #define TPM_C0V				0x24
34 
35 static int counter_width __ro_after_init;
36 static void __iomem *timer_base __ro_after_init;
37 
tpm_timer_disable(void)38 static inline void tpm_timer_disable(void)
39 {
40 	unsigned int val;
41 
42 	/* channel disable */
43 	val = readl(timer_base + TPM_C0SC);
44 	val &= ~(TPM_C0SC_MODE_MASK | TPM_C0SC_CHIE);
45 	writel(val, timer_base + TPM_C0SC);
46 }
47 
tpm_timer_enable(void)48 static inline void tpm_timer_enable(void)
49 {
50 	unsigned int val;
51 
52 	/* channel enabled in sw compare mode */
53 	val = readl(timer_base + TPM_C0SC);
54 	val |= (TPM_C0SC_MODE_SW_COMPARE << TPM_C0SC_MODE_SHIFT) |
55 	       TPM_C0SC_CHIE;
56 	writel(val, timer_base + TPM_C0SC);
57 }
58 
tpm_irq_acknowledge(void)59 static inline void tpm_irq_acknowledge(void)
60 {
61 	writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
62 }
63 
tpm_read_counter(void)64 static inline unsigned long tpm_read_counter(void)
65 {
66 	return readl(timer_base + TPM_CNT);
67 }
68 
69 #if defined(CONFIG_ARM)
70 static struct delay_timer tpm_delay_timer;
71 
tpm_read_current_timer(void)72 static unsigned long tpm_read_current_timer(void)
73 {
74 	return tpm_read_counter();
75 }
76 
tpm_read_sched_clock(void)77 static u64 notrace tpm_read_sched_clock(void)
78 {
79 	return tpm_read_counter();
80 }
81 #endif
82 
tpm_set_next_event(unsigned long delta,struct clock_event_device * evt)83 static int tpm_set_next_event(unsigned long delta,
84 				struct clock_event_device *evt)
85 {
86 	unsigned long next, prev, now;
87 
88 	prev = tpm_read_counter();
89 	next = prev + delta;
90 	writel(next, timer_base + TPM_C0V);
91 	now = tpm_read_counter();
92 
93 	/*
94 	 * Need to wait CNT increase at least 1 cycle to make sure
95 	 * the C0V has been updated into HW.
96 	 */
97 	if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
98 		while (now == tpm_read_counter())
99 			;
100 
101 	/*
102 	 * NOTE: We observed in a very small probability, the bus fabric
103 	 * contention between GPU and A7 may results a few cycles delay
104 	 * of writing CNT registers which may cause the min_delta event got
105 	 * missed, so we need add a ETIME check here in case it happened.
106 	 */
107 	return (now - prev) >= delta ? -ETIME : 0;
108 }
109 
tpm_set_state_oneshot(struct clock_event_device * evt)110 static int tpm_set_state_oneshot(struct clock_event_device *evt)
111 {
112 	tpm_timer_enable();
113 
114 	return 0;
115 }
116 
tpm_set_state_shutdown(struct clock_event_device * evt)117 static int tpm_set_state_shutdown(struct clock_event_device *evt)
118 {
119 	tpm_timer_disable();
120 
121 	return 0;
122 }
123 
tpm_timer_interrupt(int irq,void * dev_id)124 static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
125 {
126 	struct clock_event_device *evt = dev_id;
127 
128 	tpm_irq_acknowledge();
129 
130 	evt->event_handler(evt);
131 
132 	return IRQ_HANDLED;
133 }
134 
135 static struct timer_of to_tpm = {
136 	.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
137 	.clkevt = {
138 		.name			= "i.MX TPM Timer",
139 		.rating			= 200,
140 		.features		= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ,
141 		.set_state_shutdown	= tpm_set_state_shutdown,
142 		.set_state_oneshot	= tpm_set_state_oneshot,
143 		.set_next_event		= tpm_set_next_event,
144 		.cpumask		= cpu_possible_mask,
145 	},
146 	.of_irq = {
147 		.handler		= tpm_timer_interrupt,
148 		.flags			= IRQF_TIMER,
149 	},
150 	.of_clk = {
151 		.name = "per",
152 	},
153 };
154 
tpm_clocksource_init(void)155 static int __init tpm_clocksource_init(void)
156 {
157 #if defined(CONFIG_ARM)
158 	tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
159 	tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
160 	register_current_timer_delay(&tpm_delay_timer);
161 
162 	sched_clock_register(tpm_read_sched_clock, counter_width,
163 			     timer_of_rate(&to_tpm) >> 3);
164 #endif
165 
166 	return clocksource_mmio_init(timer_base + TPM_CNT,
167 				     "imx-tpm",
168 				     timer_of_rate(&to_tpm) >> 3,
169 				     to_tpm.clkevt.rating,
170 				     counter_width,
171 				     clocksource_mmio_readl_up);
172 }
173 
tpm_clockevent_init(void)174 static void __init tpm_clockevent_init(void)
175 {
176 	clockevents_config_and_register(&to_tpm.clkevt,
177 					timer_of_rate(&to_tpm) >> 3,
178 					300,
179 					GENMASK(counter_width - 1,
180 					1));
181 }
182 
tpm_timer_init(struct device_node * np)183 static int __init tpm_timer_init(struct device_node *np)
184 {
185 	struct clk *ipg;
186 	int ret;
187 
188 	ipg = of_clk_get_by_name(np, "ipg");
189 	if (IS_ERR(ipg)) {
190 		pr_err("tpm: failed to get ipg clk\n");
191 		return -ENODEV;
192 	}
193 	/* enable clk before accessing registers */
194 	ret = clk_prepare_enable(ipg);
195 	if (ret) {
196 		pr_err("tpm: ipg clock enable failed (%d)\n", ret);
197 		clk_put(ipg);
198 		return ret;
199 	}
200 
201 	ret = timer_of_init(np, &to_tpm);
202 	if (ret)
203 		return ret;
204 
205 	timer_base = timer_of_base(&to_tpm);
206 
207 	counter_width = (readl(timer_base + TPM_PARAM)
208 		& TPM_PARAM_WIDTH_MASK) >> TPM_PARAM_WIDTH_SHIFT;
209 	/* use rating 200 for 32-bit counter and 150 for 16-bit counter */
210 	to_tpm.clkevt.rating = counter_width == 0x20 ? 200 : 150;
211 
212 	/*
213 	 * Initialize tpm module to a known state
214 	 * 1) Counter disabled
215 	 * 2) TPM counter operates in up counting mode
216 	 * 3) Timer Overflow Interrupt disabled
217 	 * 4) Channel0 disabled
218 	 * 5) DMA transfers disabled
219 	 */
220 	/* make sure counter is disabled */
221 	writel(0, timer_base + TPM_SC);
222 	/* TOF is W1C */
223 	writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
224 	writel(0, timer_base + TPM_CNT);
225 	/* CHF is W1C */
226 	writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
227 
228 	/*
229 	 * increase per cnt,
230 	 * div 8 for 32-bit counter and div 128 for 16-bit counter
231 	 */
232 	writel(TPM_SC_CMOD_INC_PER_CNT |
233 		(counter_width == 0x20 ?
234 		TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
235 		timer_base + TPM_SC);
236 
237 	/* set MOD register to maximum for free running mode */
238 	writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
239 
240 	tpm_clockevent_init();
241 
242 	return tpm_clocksource_init();
243 }
244 TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init);
245