xref: /linux/drivers/mfd/stm32-timers.c (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics 2016
4  * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/mfd/stm32-timers.h>
9 #include <linux/module.h>
10 #include <linux/of_platform.h>
11 #include <linux/reset.h>
12 
13 #define STM32_TIMERS_MAX_REGISTERS	0x3fc
14 
15 /* DIER register DMA enable bits */
16 static const u32 stm32_timers_dier_dmaen[STM32_TIMERS_MAX_DMAS] = {
17 	TIM_DIER_CC1DE,
18 	TIM_DIER_CC2DE,
19 	TIM_DIER_CC3DE,
20 	TIM_DIER_CC4DE,
21 	TIM_DIER_UIE,
22 	TIM_DIER_TDE,
23 	TIM_DIER_COMDE
24 };
25 
26 static void stm32_timers_dma_done(void *p)
27 {
28 	struct stm32_timers_dma *dma = p;
29 	struct dma_tx_state state;
30 	enum dma_status status;
31 
32 	status = dmaengine_tx_status(dma->chan, dma->chan->cookie, &state);
33 	if (status == DMA_COMPLETE)
34 		complete(&dma->completion);
35 }
36 
37 /**
38  * stm32_timers_dma_burst_read - Read from timers registers using DMA.
39  *
40  * Read from STM32 timers registers using DMA on a single event.
41  * @dev: reference to stm32_timers MFD device
42  * @buf: DMA'able destination buffer
43  * @id: stm32_timers_dmas event identifier (ch[1..4], up, trig or com)
44  * @reg: registers start offset for DMA to read from (like CCRx for capture)
45  * @num_reg: number of registers to read upon each DMA request, starting @reg.
46  * @bursts: number of bursts to read (e.g. like two for pwm period capture)
47  * @tmo_ms: timeout (milliseconds)
48  */
49 int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
50 				enum stm32_timers_dmas id, u32 reg,
51 				unsigned int num_reg, unsigned int bursts,
52 				unsigned long tmo_ms)
53 {
54 	struct stm32_timers *ddata = dev_get_drvdata(dev);
55 	unsigned long timeout = msecs_to_jiffies(tmo_ms);
56 	struct regmap *regmap = ddata->regmap;
57 	struct stm32_timers_dma *dma = &ddata->dma;
58 	size_t len = num_reg * bursts * sizeof(u32);
59 	struct dma_async_tx_descriptor *desc;
60 	struct dma_slave_config config;
61 	dma_cookie_t cookie;
62 	dma_addr_t dma_buf;
63 	u32 dbl, dba;
64 	long err;
65 	int ret;
66 
67 	/* Sanity check */
68 	if (id < STM32_TIMERS_DMA_CH1 || id >= STM32_TIMERS_MAX_DMAS)
69 		return -EINVAL;
70 
71 	if (!num_reg || !bursts || reg > STM32_TIMERS_MAX_REGISTERS ||
72 	    (reg + num_reg * sizeof(u32)) > STM32_TIMERS_MAX_REGISTERS)
73 		return -EINVAL;
74 
75 	if (!dma->chans[id])
76 		return -ENODEV;
77 	mutex_lock(&dma->lock);
78 
79 	/* Select DMA channel in use */
80 	dma->chan = dma->chans[id];
81 	dma_buf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
82 	if (dma_mapping_error(dev, dma_buf)) {
83 		ret = -ENOMEM;
84 		goto unlock;
85 	}
86 
87 	/* Prepare DMA read from timer registers, using DMA burst mode */
88 	memset(&config, 0, sizeof(config));
89 	config.src_addr = (dma_addr_t)dma->phys_base + TIM_DMAR;
90 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
91 	ret = dmaengine_slave_config(dma->chan, &config);
92 	if (ret)
93 		goto unmap;
94 
95 	desc = dmaengine_prep_slave_single(dma->chan, dma_buf, len,
96 					   DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
97 	if (!desc) {
98 		ret = -EBUSY;
99 		goto unmap;
100 	}
101 
102 	desc->callback = stm32_timers_dma_done;
103 	desc->callback_param = dma;
104 	cookie = dmaengine_submit(desc);
105 	ret = dma_submit_error(cookie);
106 	if (ret)
107 		goto dma_term;
108 
109 	reinit_completion(&dma->completion);
110 	dma_async_issue_pending(dma->chan);
111 
112 	/* Setup and enable timer DMA burst mode */
113 	dbl = FIELD_PREP(TIM_DCR_DBL, bursts - 1);
114 	dba = FIELD_PREP(TIM_DCR_DBA, reg >> 2);
115 	ret = regmap_write(regmap, TIM_DCR, dbl | dba);
116 	if (ret)
117 		goto dma_term;
118 
119 	/* Clear pending flags before enabling DMA request */
120 	ret = regmap_write(regmap, TIM_SR, 0);
121 	if (ret)
122 		goto dcr_clr;
123 
124 	ret = regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id],
125 				 stm32_timers_dier_dmaen[id]);
126 	if (ret)
127 		goto dcr_clr;
128 
129 	err = wait_for_completion_interruptible_timeout(&dma->completion,
130 							timeout);
131 	if (err == 0)
132 		ret = -ETIMEDOUT;
133 	else if (err < 0)
134 		ret = err;
135 
136 	regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id], 0);
137 	regmap_write(regmap, TIM_SR, 0);
138 dcr_clr:
139 	regmap_write(regmap, TIM_DCR, 0);
140 dma_term:
141 	dmaengine_terminate_all(dma->chan);
142 unmap:
143 	dma_unmap_single(dev, dma_buf, len, DMA_FROM_DEVICE);
144 unlock:
145 	dma->chan = NULL;
146 	mutex_unlock(&dma->lock);
147 
148 	return ret;
149 }
150 EXPORT_SYMBOL_GPL(stm32_timers_dma_burst_read);
151 
152 static const struct regmap_config stm32_timers_regmap_cfg = {
153 	.reg_bits = 32,
154 	.val_bits = 32,
155 	.reg_stride = sizeof(u32),
156 	.max_register = STM32_TIMERS_MAX_REGISTERS,
157 };
158 
159 static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
160 {
161 	u32 arr;
162 
163 	/* Backup ARR to restore it after getting the maximum value */
164 	regmap_read(ddata->regmap, TIM_ARR, &arr);
165 
166 	/*
167 	 * Only the available bits will be written so when readback
168 	 * we get the maximum value of auto reload register
169 	 */
170 	regmap_write(ddata->regmap, TIM_ARR, ~0L);
171 	regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
172 	regmap_write(ddata->regmap, TIM_ARR, arr);
173 }
174 
175 static int stm32_timers_dma_probe(struct device *dev,
176 				   struct stm32_timers *ddata)
177 {
178 	int i;
179 	int ret = 0;
180 	char name[4];
181 
182 	init_completion(&ddata->dma.completion);
183 	mutex_init(&ddata->dma.lock);
184 
185 	/* Optional DMA support: get valid DMA channel(s) or NULL */
186 	for (i = STM32_TIMERS_DMA_CH1; i <= STM32_TIMERS_DMA_CH4; i++) {
187 		snprintf(name, ARRAY_SIZE(name), "ch%1d", i + 1);
188 		ddata->dma.chans[i] = dma_request_chan(dev, name);
189 	}
190 	ddata->dma.chans[STM32_TIMERS_DMA_UP] = dma_request_chan(dev, "up");
191 	ddata->dma.chans[STM32_TIMERS_DMA_TRIG] = dma_request_chan(dev, "trig");
192 	ddata->dma.chans[STM32_TIMERS_DMA_COM] = dma_request_chan(dev, "com");
193 
194 	for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++) {
195 		if (IS_ERR(ddata->dma.chans[i])) {
196 			/* Save the first error code to return */
197 			if (PTR_ERR(ddata->dma.chans[i]) != -ENODEV && !ret)
198 				ret = PTR_ERR(ddata->dma.chans[i]);
199 
200 			ddata->dma.chans[i] = NULL;
201 		}
202 	}
203 
204 	return ret;
205 }
206 
207 static void stm32_timers_dma_remove(struct device *dev,
208 				    struct stm32_timers *ddata)
209 {
210 	int i;
211 
212 	for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++)
213 		if (ddata->dma.chans[i])
214 			dma_release_channel(ddata->dma.chans[i]);
215 }
216 
217 static int stm32_timers_probe(struct platform_device *pdev)
218 {
219 	struct device *dev = &pdev->dev;
220 	struct stm32_timers *ddata;
221 	struct resource *res;
222 	void __iomem *mmio;
223 	int ret;
224 
225 	ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
226 	if (!ddata)
227 		return -ENOMEM;
228 
229 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
230 	mmio = devm_ioremap_resource(dev, res);
231 	if (IS_ERR(mmio))
232 		return PTR_ERR(mmio);
233 
234 	/* Timer physical addr for DMA */
235 	ddata->dma.phys_base = res->start;
236 
237 	ddata->regmap = devm_regmap_init_mmio_clk(dev, "int", mmio,
238 						  &stm32_timers_regmap_cfg);
239 	if (IS_ERR(ddata->regmap))
240 		return PTR_ERR(ddata->regmap);
241 
242 	ddata->clk = devm_clk_get(dev, NULL);
243 	if (IS_ERR(ddata->clk))
244 		return PTR_ERR(ddata->clk);
245 
246 	stm32_timers_get_arr_size(ddata);
247 
248 	ret = stm32_timers_dma_probe(dev, ddata);
249 	if (ret) {
250 		stm32_timers_dma_remove(dev, ddata);
251 		return ret;
252 	}
253 
254 	platform_set_drvdata(pdev, ddata);
255 
256 	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
257 	if (ret)
258 		stm32_timers_dma_remove(dev, ddata);
259 
260 	return ret;
261 }
262 
263 static int stm32_timers_remove(struct platform_device *pdev)
264 {
265 	struct stm32_timers *ddata = platform_get_drvdata(pdev);
266 
267 	/*
268 	 * Don't use devm_ here: enfore of_platform_depopulate() happens before
269 	 * DMA are released, to avoid race on DMA.
270 	 */
271 	of_platform_depopulate(&pdev->dev);
272 	stm32_timers_dma_remove(&pdev->dev, ddata);
273 
274 	return 0;
275 }
276 
277 static const struct of_device_id stm32_timers_of_match[] = {
278 	{ .compatible = "st,stm32-timers", },
279 	{ /* end node */ },
280 };
281 MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
282 
283 static struct platform_driver stm32_timers_driver = {
284 	.probe = stm32_timers_probe,
285 	.remove = stm32_timers_remove,
286 	.driver	= {
287 		.name = "stm32-timers",
288 		.of_match_table = stm32_timers_of_match,
289 	},
290 };
291 module_platform_driver(stm32_timers_driver);
292 
293 MODULE_DESCRIPTION("STMicroelectronics STM32 Timers");
294 MODULE_LICENSE("GPL v2");
295