xref: /linux/drivers/dma/stm32/stm32-dmamux.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) STMicroelectronics SA 2017
5  * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
6  *            Pierre-Yves Mordret <pierre-yves.mordret@st.com>
7  *
8  * DMA Router driver for STM32 DMA MUX
9  *
10  * Based on TI DMA Crossbar driver
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/reset.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 
27 #define STM32_DMAMUX_CCR(x)		(0x4 * (x))
28 #define STM32_DMAMUX_MAX_DMA_REQUESTS	32
29 #define STM32_DMAMUX_MAX_REQUESTS	255
30 
31 struct stm32_dmamux {
32 	u32 master;
33 	u32 request;
34 	u32 chan_id;
35 };
36 
37 struct stm32_dmamux_data {
38 	struct dma_router dmarouter;
39 	struct clk *clk;
40 	void __iomem *iomem;
41 	u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
42 	u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
43 	spinlock_t lock; /* Protects register access */
44 	DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
45 	u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
46 						 * in suspend
47 						 */
48 	u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
49 			 *  [0] holds number of DMA Masters.
50 			 *  To be kept at very end of this structure
51 			 */
52 };
53 
54 static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
55 {
56 	return readl_relaxed(iomem + reg);
57 }
58 
59 static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
60 {
61 	writel_relaxed(val, iomem + reg);
62 }
63 
64 static void stm32_dmamux_free(struct device *dev, void *route_data)
65 {
66 	struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
67 	struct stm32_dmamux *mux = route_data;
68 	unsigned long flags;
69 
70 	/* Clear dma request */
71 	spin_lock_irqsave(&dmamux->lock, flags);
72 
73 	stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
74 	clear_bit(mux->chan_id, dmamux->dma_inuse);
75 
76 	pm_runtime_put_sync(dev);
77 
78 	spin_unlock_irqrestore(&dmamux->lock, flags);
79 
80 	dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
81 		mux->request, mux->master, mux->chan_id);
82 
83 	kfree(mux);
84 }
85 
86 static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
87 					 struct of_dma *ofdma)
88 {
89 	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
90 	struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
91 	struct stm32_dmamux *mux;
92 	u32 i, min, max;
93 	int ret;
94 	unsigned long flags;
95 
96 	if (dma_spec->args_count != 3) {
97 		dev_err(&pdev->dev, "invalid number of dma mux args\n");
98 		return ERR_PTR(-EINVAL);
99 	}
100 
101 	if (dma_spec->args[0] > dmamux->dmamux_requests) {
102 		dev_err(&pdev->dev, "invalid mux request number: %d\n",
103 			dma_spec->args[0]);
104 		return ERR_PTR(-EINVAL);
105 	}
106 
107 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
108 	if (!mux)
109 		return ERR_PTR(-ENOMEM);
110 
111 	spin_lock_irqsave(&dmamux->lock, flags);
112 	mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
113 					   dmamux->dma_requests);
114 
115 	if (mux->chan_id == dmamux->dma_requests) {
116 		spin_unlock_irqrestore(&dmamux->lock, flags);
117 		dev_err(&pdev->dev, "Run out of free DMA requests\n");
118 		ret = -ENOMEM;
119 		goto error_chan_id;
120 	}
121 	set_bit(mux->chan_id, dmamux->dma_inuse);
122 	spin_unlock_irqrestore(&dmamux->lock, flags);
123 
124 	/* Look for DMA Master */
125 	for (i = 1, min = 0, max = dmamux->dma_reqs[i];
126 	     i <= dmamux->dma_reqs[0];
127 	     min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
128 		if (mux->chan_id < max)
129 			break;
130 	mux->master = i - 1;
131 
132 	/* The of_node_put() will be done in of_dma_router_xlate function */
133 	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
134 	if (!dma_spec->np) {
135 		dev_err(&pdev->dev, "can't get dma master\n");
136 		ret = -EINVAL;
137 		goto error;
138 	}
139 
140 	/* Set dma request */
141 	spin_lock_irqsave(&dmamux->lock, flags);
142 	ret = pm_runtime_resume_and_get(&pdev->dev);
143 	if (ret < 0) {
144 		spin_unlock_irqrestore(&dmamux->lock, flags);
145 		goto error;
146 	}
147 	spin_unlock_irqrestore(&dmamux->lock, flags);
148 
149 	mux->request = dma_spec->args[0];
150 
151 	/*  craft DMA spec */
152 	dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
153 	dma_spec->args[2] = dma_spec->args[1];
154 	dma_spec->args[1] = 0;
155 	dma_spec->args[0] = mux->chan_id - min;
156 	dma_spec->args_count = 4;
157 
158 	stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
159 			   mux->request);
160 	dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
161 		mux->request, mux->master, mux->chan_id);
162 
163 	return mux;
164 
165 error:
166 	clear_bit(mux->chan_id, dmamux->dma_inuse);
167 
168 error_chan_id:
169 	kfree(mux);
170 	return ERR_PTR(ret);
171 }
172 
173 static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
174 	{ .compatible = "st,stm32-dma", },
175 	{},
176 };
177 
178 static int stm32_dmamux_probe(struct platform_device *pdev)
179 {
180 	struct device_node *node = pdev->dev.of_node;
181 	const struct of_device_id *match;
182 	struct device_node *dma_node;
183 	struct stm32_dmamux_data *stm32_dmamux;
184 	void __iomem *iomem;
185 	struct reset_control *rst;
186 	int i, count, ret;
187 	u32 dma_req;
188 
189 	if (!node)
190 		return -ENODEV;
191 
192 	count = device_property_count_u32(&pdev->dev, "dma-masters");
193 	if (count < 0) {
194 		dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
195 		return -ENODEV;
196 	}
197 
198 	stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
199 				    sizeof(u32) * (count + 1), GFP_KERNEL);
200 	if (!stm32_dmamux)
201 		return -ENOMEM;
202 
203 	dma_req = 0;
204 	for (i = 1; i <= count; i++) {
205 		dma_node = of_parse_phandle(node, "dma-masters", i - 1);
206 
207 		match = of_match_node(stm32_stm32dma_master_match, dma_node);
208 		if (!match) {
209 			dev_err(&pdev->dev, "DMA master is not supported\n");
210 			of_node_put(dma_node);
211 			return -EINVAL;
212 		}
213 
214 		if (of_property_read_u32(dma_node, "dma-requests",
215 					 &stm32_dmamux->dma_reqs[i])) {
216 			dev_info(&pdev->dev,
217 				 "Missing MUX output information, using %u.\n",
218 				 STM32_DMAMUX_MAX_DMA_REQUESTS);
219 			stm32_dmamux->dma_reqs[i] =
220 				STM32_DMAMUX_MAX_DMA_REQUESTS;
221 		}
222 		dma_req += stm32_dmamux->dma_reqs[i];
223 		of_node_put(dma_node);
224 	}
225 
226 	if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
227 		dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
228 		return -ENODEV;
229 	}
230 
231 	stm32_dmamux->dma_requests = dma_req;
232 	stm32_dmamux->dma_reqs[0] = count;
233 
234 	if (device_property_read_u32(&pdev->dev, "dma-requests",
235 				     &stm32_dmamux->dmamux_requests)) {
236 		stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
237 		dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
238 			 stm32_dmamux->dmamux_requests);
239 	}
240 	pm_runtime_get_noresume(&pdev->dev);
241 
242 	iomem = devm_platform_ioremap_resource(pdev, 0);
243 	if (IS_ERR(iomem))
244 		return PTR_ERR(iomem);
245 
246 	spin_lock_init(&stm32_dmamux->lock);
247 
248 	stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
249 	if (IS_ERR(stm32_dmamux->clk))
250 		return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
251 				     "Missing clock controller\n");
252 
253 	ret = clk_prepare_enable(stm32_dmamux->clk);
254 	if (ret < 0) {
255 		dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
256 		return ret;
257 	}
258 
259 	rst = devm_reset_control_get(&pdev->dev, NULL);
260 	if (IS_ERR(rst)) {
261 		ret = PTR_ERR(rst);
262 		if (ret == -EPROBE_DEFER)
263 			goto err_clk;
264 	} else if (count > 1) { /* Don't reset if there is only one dma-master */
265 		reset_control_assert(rst);
266 		udelay(2);
267 		reset_control_deassert(rst);
268 	}
269 
270 	stm32_dmamux->iomem = iomem;
271 	stm32_dmamux->dmarouter.dev = &pdev->dev;
272 	stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
273 
274 	platform_set_drvdata(pdev, stm32_dmamux);
275 	pm_runtime_set_active(&pdev->dev);
276 	pm_runtime_enable(&pdev->dev);
277 
278 	pm_runtime_get_noresume(&pdev->dev);
279 
280 	/* Reset the dmamux */
281 	for (i = 0; i < stm32_dmamux->dma_requests; i++)
282 		stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
283 
284 	pm_runtime_put(&pdev->dev);
285 
286 	ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
287 				     &stm32_dmamux->dmarouter);
288 	if (ret)
289 		goto pm_disable;
290 
291 	return 0;
292 
293 pm_disable:
294 	pm_runtime_disable(&pdev->dev);
295 err_clk:
296 	clk_disable_unprepare(stm32_dmamux->clk);
297 
298 	return ret;
299 }
300 
301 #ifdef CONFIG_PM
302 static int stm32_dmamux_runtime_suspend(struct device *dev)
303 {
304 	struct platform_device *pdev = to_platform_device(dev);
305 	struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
306 
307 	clk_disable_unprepare(stm32_dmamux->clk);
308 
309 	return 0;
310 }
311 
312 static int stm32_dmamux_runtime_resume(struct device *dev)
313 {
314 	struct platform_device *pdev = to_platform_device(dev);
315 	struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
316 	int ret;
317 
318 	ret = clk_prepare_enable(stm32_dmamux->clk);
319 	if (ret) {
320 		dev_err(&pdev->dev, "failed to prepare_enable clock\n");
321 		return ret;
322 	}
323 
324 	return 0;
325 }
326 #endif
327 
328 #ifdef CONFIG_PM_SLEEP
329 static int stm32_dmamux_suspend(struct device *dev)
330 {
331 	struct platform_device *pdev = to_platform_device(dev);
332 	struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
333 	int i, ret;
334 
335 	ret = pm_runtime_resume_and_get(dev);
336 	if (ret < 0)
337 		return ret;
338 
339 	for (i = 0; i < stm32_dmamux->dma_requests; i++)
340 		stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
341 							 STM32_DMAMUX_CCR(i));
342 
343 	pm_runtime_put_sync(dev);
344 
345 	pm_runtime_force_suspend(dev);
346 
347 	return 0;
348 }
349 
350 static int stm32_dmamux_resume(struct device *dev)
351 {
352 	struct platform_device *pdev = to_platform_device(dev);
353 	struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
354 	int i, ret;
355 
356 	ret = pm_runtime_force_resume(dev);
357 	if (ret < 0)
358 		return ret;
359 
360 	ret = pm_runtime_resume_and_get(dev);
361 	if (ret < 0)
362 		return ret;
363 
364 	for (i = 0; i < stm32_dmamux->dma_requests; i++)
365 		stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
366 				   stm32_dmamux->ccr[i]);
367 
368 	pm_runtime_put_sync(dev);
369 
370 	return 0;
371 }
372 #endif
373 
374 static const struct dev_pm_ops stm32_dmamux_pm_ops = {
375 	SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
376 	SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
377 			   stm32_dmamux_runtime_resume, NULL)
378 };
379 
380 static const struct of_device_id stm32_dmamux_match[] = {
381 	{ .compatible = "st,stm32h7-dmamux" },
382 	{},
383 };
384 
385 static struct platform_driver stm32_dmamux_driver = {
386 	.probe	= stm32_dmamux_probe,
387 	.driver = {
388 		.name = "stm32-dmamux",
389 		.of_match_table = stm32_dmamux_match,
390 		.pm = &stm32_dmamux_pm_ops,
391 	},
392 };
393 
394 static int __init stm32_dmamux_init(void)
395 {
396 	return platform_driver_register(&stm32_dmamux_driver);
397 }
398 arch_initcall(stm32_dmamux_init);
399 
400 MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
401 MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
402 MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
403