xref: /linux/drivers/clocksource/mps2-timer.c (revision cbecf716ca618fd44feda6bd9a64a8179d031fc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 ARM Limited
4  *
5  * Author: Vladimir Murzin <vladimir.murzin@arm.com>
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/clk.h>
11 #include <linux/clockchips.h>
12 #include <linux/clocksource.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of.h>
19 #include <linux/of_irq.h>
20 #include <linux/sched_clock.h>
21 #include <linux/slab.h>
22 
23 #define TIMER_CTRL		0x0
24 #define TIMER_CTRL_ENABLE	BIT(0)
25 #define TIMER_CTRL_IE		BIT(3)
26 
27 #define TIMER_VALUE		0x4
28 #define TIMER_RELOAD		0x8
29 #define TIMER_INT		0xc
30 
31 struct clockevent_mps2 {
32 	void __iomem *reg;
33 	u32 clock_count_per_tick;
34 	struct clock_event_device clkevt;
35 };
36 
37 static void __iomem *sched_clock_base;
38 
mps2_sched_read(void)39 static u64 notrace mps2_sched_read(void)
40 {
41 	return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
42 }
43 
to_mps2_clkevt(struct clock_event_device * c)44 static inline struct clockevent_mps2 *to_mps2_clkevt(struct clock_event_device *c)
45 {
46 	return container_of(c, struct clockevent_mps2, clkevt);
47 }
48 
clockevent_mps2_writel(u32 val,struct clock_event_device * c,u32 offset)49 static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset)
50 {
51 	writel_relaxed(val, to_mps2_clkevt(c)->reg + offset);
52 }
53 
mps2_timer_shutdown(struct clock_event_device * ce)54 static int mps2_timer_shutdown(struct clock_event_device *ce)
55 {
56 	clockevent_mps2_writel(0, ce, TIMER_RELOAD);
57 	clockevent_mps2_writel(0, ce, TIMER_CTRL);
58 
59 	return 0;
60 }
61 
mps2_timer_set_next_event(unsigned long next,struct clock_event_device * ce)62 static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce)
63 {
64 	clockevent_mps2_writel(next, ce, TIMER_VALUE);
65 	clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
66 
67 	return 0;
68 }
69 
mps2_timer_set_periodic(struct clock_event_device * ce)70 static int mps2_timer_set_periodic(struct clock_event_device *ce)
71 {
72 	u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick;
73 
74 	clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD);
75 	clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE);
76 	clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
77 
78 	return 0;
79 }
80 
mps2_timer_interrupt(int irq,void * dev_id)81 static irqreturn_t mps2_timer_interrupt(int irq, void *dev_id)
82 {
83 	struct clockevent_mps2 *ce = dev_id;
84 	u32 status = readl_relaxed(ce->reg + TIMER_INT);
85 
86 	if (!status) {
87 		pr_warn("spurious interrupt\n");
88 		return IRQ_NONE;
89 	}
90 
91 	writel_relaxed(1, ce->reg + TIMER_INT);
92 
93 	ce->clkevt.event_handler(&ce->clkevt);
94 
95 	return IRQ_HANDLED;
96 }
97 
mps2_clockevent_init(struct device_node * np)98 static int __init mps2_clockevent_init(struct device_node *np)
99 {
100 	void __iomem *base;
101 	struct clk *clk = NULL;
102 	struct clockevent_mps2 *ce;
103 	u32 rate;
104 	int irq, ret;
105 	const char *name = "mps2-clkevt";
106 
107 	ret = of_property_read_u32(np, "clock-frequency", &rate);
108 	if (ret) {
109 		clk = of_clk_get(np, 0);
110 		if (IS_ERR(clk)) {
111 			ret = PTR_ERR(clk);
112 			pr_err("failed to get clock for clockevent: %d\n", ret);
113 			goto out;
114 		}
115 
116 		ret = clk_prepare_enable(clk);
117 		if (ret) {
118 			pr_err("failed to enable clock for clockevent: %d\n", ret);
119 			goto out_clk_put;
120 		}
121 
122 		rate = clk_get_rate(clk);
123 	}
124 
125 	base = of_iomap(np, 0);
126 	if (!base) {
127 		ret = -EADDRNOTAVAIL;
128 		pr_err("failed to map register for clockevent: %d\n", ret);
129 		goto out_clk_disable;
130 	}
131 
132 	irq = irq_of_parse_and_map(np, 0);
133 	if (!irq) {
134 		ret = -ENOENT;
135 		pr_err("failed to get irq for clockevent: %d\n", ret);
136 		goto out_iounmap;
137 	}
138 
139 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
140 	if (!ce) {
141 		ret = -ENOMEM;
142 		goto out_iounmap;
143 	}
144 
145 	ce->reg = base;
146 	ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ);
147 	ce->clkevt.irq = irq;
148 	ce->clkevt.name = name;
149 	ce->clkevt.rating = 200;
150 	ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
151 	ce->clkevt.cpumask = cpu_possible_mask;
152 	ce->clkevt.set_state_shutdown	= mps2_timer_shutdown;
153 	ce->clkevt.set_state_periodic	= mps2_timer_set_periodic;
154 	ce->clkevt.set_state_oneshot	= mps2_timer_shutdown;
155 	ce->clkevt.set_next_event	= mps2_timer_set_next_event;
156 
157 	/* Ensure timer is disabled */
158 	writel_relaxed(0, base + TIMER_CTRL);
159 
160 	ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce);
161 	if (ret) {
162 		pr_err("failed to request irq for clockevent: %d\n", ret);
163 		goto out_kfree;
164 	}
165 
166 	clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff);
167 
168 	return 0;
169 
170 out_kfree:
171 	kfree(ce);
172 out_iounmap:
173 	iounmap(base);
174 out_clk_disable:
175 	/* clk_{disable, unprepare, put}() can handle NULL as a parameter */
176 	clk_disable_unprepare(clk);
177 out_clk_put:
178 	clk_put(clk);
179 out:
180 	return ret;
181 }
182 
mps2_clocksource_init(struct device_node * np)183 static int __init mps2_clocksource_init(struct device_node *np)
184 {
185 	void __iomem *base;
186 	struct clk *clk = NULL;
187 	u32 rate;
188 	int ret;
189 	const char *name = "mps2-clksrc";
190 
191 	ret = of_property_read_u32(np, "clock-frequency", &rate);
192 	if (ret) {
193 		clk = of_clk_get(np, 0);
194 		if (IS_ERR(clk)) {
195 			ret = PTR_ERR(clk);
196 			pr_err("failed to get clock for clocksource: %d\n", ret);
197 			goto out;
198 		}
199 
200 		ret = clk_prepare_enable(clk);
201 		if (ret) {
202 			pr_err("failed to enable clock for clocksource: %d\n", ret);
203 			goto out_clk_put;
204 		}
205 
206 		rate = clk_get_rate(clk);
207 	}
208 
209 	base = of_iomap(np, 0);
210 	if (!base) {
211 		ret = -EADDRNOTAVAIL;
212 		pr_err("failed to map register for clocksource: %d\n", ret);
213 		goto out_clk_disable;
214 	}
215 
216 	/* Ensure timer is disabled */
217 	writel_relaxed(0, base + TIMER_CTRL);
218 
219 	/* ... and set it up as free-running clocksource */
220 	writel_relaxed(0xffffffff, base + TIMER_VALUE);
221 	writel_relaxed(0xffffffff, base + TIMER_RELOAD);
222 
223 	writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL);
224 
225 	ret = clocksource_mmio_init(base + TIMER_VALUE, name,
226 				    rate, 200, 32,
227 				    clocksource_mmio_readl_down);
228 	if (ret) {
229 		pr_err("failed to init clocksource: %d\n", ret);
230 		goto out_iounmap;
231 	}
232 
233 	sched_clock_base = base;
234 	sched_clock_register(mps2_sched_read, 32, rate);
235 
236 	return 0;
237 
238 out_iounmap:
239 	iounmap(base);
240 out_clk_disable:
241 	/* clk_{disable, unprepare, put}() can handle NULL as a parameter */
242 	clk_disable_unprepare(clk);
243 out_clk_put:
244 	clk_put(clk);
245 out:
246 	return ret;
247 }
248 
mps2_timer_init(struct device_node * np)249 static int __init mps2_timer_init(struct device_node *np)
250 {
251 	static int has_clocksource, has_clockevent;
252 	int ret;
253 
254 	if (!has_clocksource) {
255 		ret = mps2_clocksource_init(np);
256 		if (!ret) {
257 			has_clocksource = 1;
258 			return 0;
259 		}
260 	}
261 
262 	if (!has_clockevent) {
263 		ret = mps2_clockevent_init(np);
264 		if (!ret) {
265 			has_clockevent = 1;
266 			return 0;
267 		}
268 	}
269 
270 	return 0;
271 }
272 
273 TIMER_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
274