xref: /linux/drivers/dma/fsl-edma-main.c (revision a516c618a627e30b5613fadd264d4b4498254aeb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/dma/fsl-edma.c
4  *
5  * Copyright 2013-2014 Freescale Semiconductor, Inc.
6  * Copyright 2024 NXP
7  *
8  * Driver for the Freescale eDMA engine with flexible channel multiplexing
9  * capability for DMA request sources. The eDMA block can be found on some
10  * Vybrid, Layerscape and S32G SoCs.
11  */
12 
13 #include <dt-bindings/dma/fsl-edma.h>
14 #include <linux/bitfield.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/clk.h>
18 #include <linux/of.h>
19 #include <linux/of_dma.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/pm_domain.h>
23 #include <linux/property.h>
24 
25 #include "fsl-edma-common.h"
26 
fsl_edma_synchronize(struct dma_chan * chan)27 static void fsl_edma_synchronize(struct dma_chan *chan)
28 {
29 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
30 
31 	vchan_synchronize(&fsl_chan->vchan);
32 }
33 
fsl_edma_tx_handler(int irq,void * dev_id)34 static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
35 {
36 	struct fsl_edma_engine *fsl_edma = dev_id;
37 	unsigned int intr, ch;
38 	struct edma_regs *regs = &fsl_edma->regs;
39 
40 	intr = edma_readl(fsl_edma, regs->intl);
41 	if (!intr)
42 		return IRQ_NONE;
43 
44 	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
45 		if (intr & (0x1 << ch)) {
46 			edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
47 			fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
48 		}
49 	}
50 	return IRQ_HANDLED;
51 }
52 
fsl_edma3_err_check(struct fsl_edma_chan * fsl_chan)53 static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
54 {
55 	unsigned int ch_err;
56 	u32 val;
57 
58 	scoped_guard(spinlock, &fsl_chan->vchan.lock) {
59 		ch_err = edma_readl_chreg(fsl_chan, ch_es);
60 		if (!(ch_err & EDMA_V3_CH_ERR))
61 			return;
62 
63 		edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
64 		val = edma_readl_chreg(fsl_chan, ch_csr);
65 		val &= ~EDMA_V3_CH_CSR_ERQ;
66 		edma_writel_chreg(fsl_chan, val, ch_csr);
67 	}
68 
69 	/* Ignore this interrupt since channel has been disabled already */
70 	if (!fsl_chan->edesc)
71 		return;
72 
73 	if (ch_err & EDMA_V3_CH_ERR_DBE)
74 		dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
75 
76 	if (ch_err & EDMA_V3_CH_ERR_SBE)
77 		dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
78 
79 	if (ch_err & EDMA_V3_CH_ERR_SGE)
80 		dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
81 
82 	if (ch_err & EDMA_V3_CH_ERR_NCE)
83 		dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
84 
85 	if (ch_err & EDMA_V3_CH_ERR_DOE)
86 		dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
87 
88 	if (ch_err & EDMA_V3_CH_ERR_DAE)
89 		dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
90 
91 	if (ch_err & EDMA_V3_CH_ERR_SOE)
92 		dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
93 
94 	if (ch_err & EDMA_V3_CH_ERR_SAE)
95 		dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
96 
97 	if (ch_err & EDMA_V3_CH_ERR_ECX)
98 		dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
99 
100 	if (ch_err & EDMA_V3_CH_ERR_UCE)
101 		dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
102 
103 	fsl_chan->status = DMA_ERROR;
104 }
105 
fsl_edma3_err_handler_per_chan(int irq,void * dev_id)106 static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
107 {
108 	struct fsl_edma_chan *fsl_chan = dev_id;
109 
110 	fsl_edma3_err_check(fsl_chan);
111 
112 	return IRQ_HANDLED;
113 }
114 
fsl_edma3_err_handler_shared(int irq,void * dev_id)115 static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
116 {
117 	struct fsl_edma_engine *fsl_edma = dev_id;
118 	unsigned int ch;
119 
120 	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
121 		if (fsl_edma->chan_masked & BIT(ch))
122 			continue;
123 
124 		fsl_edma3_err_check(&fsl_edma->chans[ch]);
125 	}
126 
127 	return IRQ_HANDLED;
128 }
129 
fsl_edma3_tx_handler(int irq,void * dev_id)130 static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
131 {
132 	struct fsl_edma_chan *fsl_chan = dev_id;
133 	unsigned int intr;
134 
135 	intr = edma_readl_chreg(fsl_chan, ch_int);
136 	if (!intr)
137 		return IRQ_NONE;
138 
139 	edma_writel_chreg(fsl_chan, 1, ch_int);
140 
141 	fsl_edma_tx_chan_handler(fsl_chan);
142 
143 	return IRQ_HANDLED;
144 }
145 
fsl_edma2_tx_handler(int irq,void * devi_id)146 static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
147 {
148 	struct fsl_edma_chan *fsl_chan = devi_id;
149 
150 	return fsl_edma_tx_handler(irq, fsl_chan->edma);
151 }
152 
fsl_edma3_or_tx_handler(int irq,void * dev_id,u8 start,u8 end)153 static irqreturn_t fsl_edma3_or_tx_handler(int irq, void *dev_id,
154 					   u8 start, u8 end)
155 {
156 	struct fsl_edma_engine *fsl_edma = dev_id;
157 	struct fsl_edma_chan *chan;
158 	int i;
159 
160 	end = min(end, fsl_edma->n_chans);
161 
162 	for (i = start; i < end; i++) {
163 		chan = &fsl_edma->chans[i];
164 
165 		fsl_edma3_tx_handler(irq, chan);
166 	}
167 
168 	return IRQ_HANDLED;
169 }
170 
fsl_edma3_tx_0_15_handler(int irq,void * dev_id)171 static irqreturn_t fsl_edma3_tx_0_15_handler(int irq, void *dev_id)
172 {
173 	return fsl_edma3_or_tx_handler(irq, dev_id, 0, 16);
174 }
175 
fsl_edma3_tx_16_31_handler(int irq,void * dev_id)176 static irqreturn_t fsl_edma3_tx_16_31_handler(int irq, void *dev_id)
177 {
178 	return fsl_edma3_or_tx_handler(irq, dev_id, 16, 32);
179 }
180 
fsl_edma3_or_err_handler(int irq,void * dev_id)181 static irqreturn_t fsl_edma3_or_err_handler(int irq, void *dev_id)
182 {
183 	struct fsl_edma_engine *fsl_edma = dev_id;
184 	struct edma_regs *regs = &fsl_edma->regs;
185 	unsigned int err, ch, ch_es;
186 	struct fsl_edma_chan *chan;
187 
188 	err = edma_readl(fsl_edma, regs->es);
189 	if (!(err & EDMA_V3_MP_ES_VLD))
190 		return IRQ_NONE;
191 
192 	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
193 		chan = &fsl_edma->chans[ch];
194 
195 		ch_es = edma_readl_chreg(chan, ch_es);
196 		if (!(ch_es & EDMA_V3_CH_ES_ERR))
197 			continue;
198 
199 		edma_writel_chreg(chan, EDMA_V3_CH_ES_ERR, ch_es);
200 		fsl_edma_disable_request(chan);
201 		fsl_edma->chans[ch].status = DMA_ERROR;
202 	}
203 
204 	return IRQ_HANDLED;
205 }
206 
fsl_edma_err_handler(int irq,void * dev_id)207 static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
208 {
209 	struct fsl_edma_engine *fsl_edma = dev_id;
210 	unsigned int err, ch;
211 	struct edma_regs *regs = &fsl_edma->regs;
212 
213 	err = edma_readl(fsl_edma, regs->errl);
214 	if (!err)
215 		return IRQ_NONE;
216 
217 	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
218 		if (err & (0x1 << ch)) {
219 			fsl_edma_disable_request(&fsl_edma->chans[ch]);
220 			edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
221 			fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
222 		}
223 	}
224 	return IRQ_HANDLED;
225 }
226 
fsl_edma_irq_handler(int irq,void * dev_id)227 static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
228 {
229 	if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
230 		return IRQ_HANDLED;
231 
232 	return fsl_edma_err_handler(irq, dev_id);
233 }
234 
fsl_edma_srcid_in_use(struct fsl_edma_engine * fsl_edma,u32 srcid)235 static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
236 {
237 	struct fsl_edma_chan *fsl_chan;
238 	int i;
239 
240 	for (i = 0; i < fsl_edma->n_chans; i++) {
241 		fsl_chan = &fsl_edma->chans[i];
242 
243 		if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
244 			dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!\n");
245 			return true;
246 		}
247 	}
248 	return false;
249 }
250 
fsl_edma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)251 static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
252 		struct of_dma *ofdma)
253 {
254 	struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
255 	struct dma_chan *chan, *_chan;
256 	struct fsl_edma_chan *fsl_chan;
257 	u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
258 	unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
259 
260 	if (dma_spec->args_count != 2)
261 		return NULL;
262 
263 	guard(mutex)(&fsl_edma->fsl_edma_mutex);
264 
265 	list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
266 		if (chan->client_count)
267 			continue;
268 
269 		if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[1]))
270 			return NULL;
271 
272 		if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
273 			chan = dma_get_slave_channel(chan);
274 			if (chan) {
275 				chan->device->privatecnt++;
276 				fsl_chan = to_fsl_edma_chan(chan);
277 				fsl_chan->srcid = dma_spec->args[1];
278 
279 				if (!fsl_chan->srcid) {
280 					dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n",
281 						fsl_chan->srcid);
282 					return NULL;
283 				}
284 
285 				fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid,
286 						true);
287 				return chan;
288 			}
289 		}
290 	}
291 	return NULL;
292 }
293 
fsl_edma3_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)294 static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
295 					struct of_dma *ofdma)
296 {
297 	struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
298 	struct dma_chan *chan, *_chan;
299 	struct fsl_edma_chan *fsl_chan;
300 	bool b_chmux;
301 	int i;
302 
303 	if (dma_spec->args_count != 3)
304 		return NULL;
305 
306 	b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
307 
308 	guard(mutex)(&fsl_edma->fsl_edma_mutex);
309 	list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
310 					device_node) {
311 
312 		if (chan->client_count)
313 			continue;
314 
315 		fsl_chan = to_fsl_edma_chan(chan);
316 		if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[0]))
317 			return NULL;
318 		i = fsl_chan - fsl_edma->chans;
319 
320 		if (!b_chmux && i != dma_spec->args[0])
321 			continue;
322 
323 		if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
324 			continue;
325 
326 		if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
327 			continue;
328 
329 		fsl_chan->srcid = dma_spec->args[0];
330 		fsl_chan->priority = dma_spec->args[1];
331 		fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
332 		fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
333 		fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
334 
335 		chan = dma_get_slave_channel(chan);
336 		chan->device->privatecnt++;
337 		return chan;
338 	}
339 	return NULL;
340 }
341 
342 static int
fsl_edma_irq_init(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)343 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
344 {
345 	int ret;
346 
347 	edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
348 
349 	fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
350 	if (fsl_edma->txirq < 0)
351 		return fsl_edma->txirq;
352 
353 	fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
354 	if (fsl_edma->errirq < 0)
355 		return fsl_edma->errirq;
356 
357 	if (fsl_edma->txirq == fsl_edma->errirq) {
358 		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
359 				fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
360 		if (ret) {
361 			dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
362 			return ret;
363 		}
364 	} else {
365 		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
366 				fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
367 		if (ret) {
368 			dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
369 			return ret;
370 		}
371 
372 		ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
373 				fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
374 		if (ret) {
375 			dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
376 			return ret;
377 		}
378 	}
379 
380 	return 0;
381 }
382 
fsl_edma3_irq_init(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)383 static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
384 {
385 	char *errirq_name;
386 	int i, ret;
387 
388 	for (i = 0; i < fsl_edma->n_chans; i++) {
389 
390 		struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
391 
392 		if (fsl_edma->chan_masked & BIT(i))
393 			continue;
394 
395 		/* request channel irq */
396 		fsl_chan->txirq = platform_get_irq(pdev, i);
397 		if (fsl_chan->txirq < 0)
398 			return  -EINVAL;
399 
400 		fsl_chan->irq_handler = fsl_edma3_tx_handler;
401 
402 		if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
403 			fsl_chan->errirq = fsl_chan->txirq;
404 			fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
405 		}
406 	}
407 
408 	/* All channel err use one irq number */
409 	if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
410 		/* last one is error irq */
411 		fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
412 		if (fsl_edma->errirq < 0)
413 			return 0; /* dts miss err irq, treat as no err irq case */
414 
415 		errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
416 					     dev_name(&pdev->dev));
417 
418 		ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
419 				       0, errirq_name, fsl_edma);
420 		if (ret)
421 			return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
422 	}
423 
424 	return 0;
425 }
426 
fsl_edma3_or_irq_init(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)427 static int fsl_edma3_or_irq_init(struct platform_device *pdev,
428 				 struct fsl_edma_engine *fsl_edma)
429 {
430 	int ret;
431 
432 	fsl_edma->txirq = platform_get_irq_byname(pdev, "tx-0-15");
433 	if (fsl_edma->txirq < 0)
434 		return fsl_edma->txirq;
435 
436 	fsl_edma->txirq_16_31 = platform_get_irq_byname(pdev, "tx-16-31");
437 	if (fsl_edma->txirq_16_31 < 0)
438 		return fsl_edma->txirq_16_31;
439 
440 	fsl_edma->errirq = platform_get_irq_byname(pdev, "err");
441 	if (fsl_edma->errirq < 0)
442 		return fsl_edma->errirq;
443 
444 	ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
445 			       fsl_edma3_tx_0_15_handler, 0, "eDMA tx0_15",
446 			       fsl_edma);
447 	if (ret)
448 		return dev_err_probe(&pdev->dev, ret,
449 			       "Can't register eDMA tx0_15 IRQ.\n");
450 
451 	if (fsl_edma->n_chans > 16) {
452 		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq_16_31,
453 				       fsl_edma3_tx_16_31_handler, 0,
454 				       "eDMA tx16_31", fsl_edma);
455 		if (ret)
456 			return dev_err_probe(&pdev->dev, ret,
457 					"Can't register eDMA tx16_31 IRQ.\n");
458 	}
459 
460 	ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
461 			       fsl_edma3_or_err_handler, 0, "eDMA err",
462 			       fsl_edma);
463 	if (ret)
464 		return dev_err_probe(&pdev->dev, ret,
465 				     "Can't register eDMA err IRQ.\n");
466 
467 	return 0;
468 }
469 
470 static int
fsl_edma2_irq_init(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)471 fsl_edma2_irq_init(struct platform_device *pdev,
472 		   struct fsl_edma_engine *fsl_edma)
473 {
474 	int i, ret, irq;
475 	int count;
476 
477 	edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
478 
479 	count = platform_irq_count(pdev);
480 	dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
481 	if (count <= 2) {
482 		dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
483 		return -EINVAL;
484 	}
485 	/*
486 	 * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
487 	 * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
488 	 * For now, just simply request irq without IRQF_SHARED flag, since 16
489 	 * channels are enough on i.mx7ulp whose M4 domain own some peripherals.
490 	 */
491 	for (i = 0; i < count; i++) {
492 		irq = platform_get_irq(pdev, i);
493 		ret = 0;
494 		if (irq < 0)
495 			return -ENXIO;
496 
497 		/* The last IRQ is for eDMA err */
498 		if (i == count - 1) {
499 			fsl_edma->errirq = irq;
500 			ret = devm_request_irq(&pdev->dev, irq,
501 						fsl_edma_err_handler,
502 						0, "eDMA2-ERR", fsl_edma);
503 		} else {
504 			fsl_edma->chans[i].txirq = irq;
505 			fsl_edma->chans[i].irq_handler = fsl_edma2_tx_handler;
506 		}
507 
508 		if (ret)
509 			return ret;
510 	}
511 
512 	return 0;
513 }
514 
fsl_edma_irq_exit(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)515 static void fsl_edma_irq_exit(
516 		struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
517 {
518 	if (fsl_edma->txirq == fsl_edma->errirq) {
519 		if (fsl_edma->txirq >= 0)
520 			devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
521 	} else {
522 		if (fsl_edma->txirq >= 0)
523 			devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
524 		if (fsl_edma->errirq >= 0)
525 			devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
526 	}
527 }
528 
fsl_disable_clocks(struct fsl_edma_engine * fsl_edma,int nr_clocks)529 static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
530 {
531 	int i;
532 
533 	for (i = 0; i < nr_clocks; i++)
534 		clk_disable_unprepare(fsl_edma->muxclk[i]);
535 }
536 
537 static struct fsl_edma_drvdata vf610_data = {
538 	.dmamuxs = DMAMUX_NR,
539 	.flags = FSL_EDMA_DRV_WRAP_IO,
540 	.chreg_off = EDMA_TCD,
541 	.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
542 	.setup_irq = fsl_edma_irq_init,
543 };
544 
545 static struct fsl_edma_drvdata ls1028a_data = {
546 	.dmamuxs = DMAMUX_NR,
547 	.flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
548 	.chreg_off = EDMA_TCD,
549 	.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
550 	.setup_irq = fsl_edma_irq_init,
551 };
552 
553 static struct fsl_edma_drvdata imx7ulp_data = {
554 	.dmamuxs = 1,
555 	.chreg_off = EDMA_TCD,
556 	.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
557 	.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
558 	.setup_irq = fsl_edma2_irq_init,
559 };
560 
561 static struct fsl_edma_drvdata imx8qm_data = {
562 	.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
563 		 | FSL_EDMA_DRV_ERRIRQ_SHARE,
564 	.chreg_space_sz = 0x10000,
565 	.chreg_off = 0x10000,
566 	.setup_irq = fsl_edma3_irq_init,
567 };
568 
569 static struct fsl_edma_drvdata imx8ulp_data = {
570 	.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
571 		 FSL_EDMA_DRV_EDMA3,
572 	.chreg_space_sz = 0x10000,
573 	.chreg_off = 0x10000,
574 	.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
575 	.mux_skip = 0x10000,
576 	.setup_irq = fsl_edma3_irq_init,
577 };
578 
579 static struct fsl_edma_drvdata imx93_data3 = {
580 	.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
581 	.chreg_space_sz = 0x10000,
582 	.chreg_off = 0x10000,
583 	.setup_irq = fsl_edma3_irq_init,
584 };
585 
586 static struct fsl_edma_drvdata imx93_data4 = {
587 	.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
588 		 | FSL_EDMA_DRV_ERRIRQ_SHARE,
589 	.chreg_space_sz = 0x8000,
590 	.chreg_off = 0x10000,
591 	.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
592 	.mux_skip = 0x8000,
593 	.setup_irq = fsl_edma3_irq_init,
594 };
595 
596 static struct fsl_edma_drvdata imx95_data5 = {
597 	.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
598 		 FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
599 	.chreg_space_sz = 0x8000,
600 	.chreg_off = 0x10000,
601 	.mux_off = 0x200,
602 	.mux_skip = sizeof(u32),
603 	.setup_irq = fsl_edma3_irq_init,
604 };
605 
606 static const struct fsl_edma_drvdata s32g2_data = {
607 	.dmamuxs = DMAMUX_NR,
608 	.chreg_space_sz = EDMA_TCD,
609 	.chreg_off = 0x4000,
610 	.flags = FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MUX_SWAP,
611 	.setup_irq = fsl_edma3_or_irq_init,
612 };
613 
614 static const struct of_device_id fsl_edma_dt_ids[] = {
615 	{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
616 	{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
617 	{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
618 	{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
619 	{ .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
620 	{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
621 	{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
622 	{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
623 	{ .compatible = "nxp,s32g2-edma", .data = &s32g2_data},
624 	{ /* sentinel */ }
625 };
626 MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
627 
fsl_edma3_detach_pd(struct fsl_edma_engine * fsl_edma)628 static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
629 {
630 	struct fsl_edma_chan *fsl_chan;
631 	int i;
632 
633 	for (i = 0; i < fsl_edma->n_chans; i++) {
634 		if (fsl_edma->chan_masked & BIT(i))
635 			continue;
636 		fsl_chan = &fsl_edma->chans[i];
637 		if (fsl_chan->pd_dev_link)
638 			device_link_del(fsl_chan->pd_dev_link);
639 		if (fsl_chan->pd_dev) {
640 			dev_pm_domain_detach(fsl_chan->pd_dev, false);
641 			pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
642 			pm_runtime_set_suspended(fsl_chan->pd_dev);
643 		}
644 	}
645 }
646 
devm_fsl_edma3_detach_pd(void * data)647 static void devm_fsl_edma3_detach_pd(void *data)
648 {
649 	fsl_edma3_detach_pd(data);
650 }
651 
fsl_edma3_attach_pd(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)652 static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
653 {
654 	struct fsl_edma_chan *fsl_chan;
655 	struct device *pd_chan;
656 	struct device *dev;
657 	int i;
658 
659 	dev = &pdev->dev;
660 
661 	for (i = 0; i < fsl_edma->n_chans; i++) {
662 		if (fsl_edma->chan_masked & BIT(i))
663 			continue;
664 
665 		fsl_chan = &fsl_edma->chans[i];
666 
667 		pd_chan = dev_pm_domain_attach_by_id(dev, i);
668 		if (IS_ERR_OR_NULL(pd_chan)) {
669 			dev_err(dev, "Failed attach pd %d\n", i);
670 			goto detach;
671 		}
672 
673 		fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
674 					     DL_FLAG_PM_RUNTIME |
675 					     DL_FLAG_RPM_ACTIVE);
676 		if (!fsl_chan->pd_dev_link) {
677 			dev_err(dev, "Failed to add device_link to %d\n", i);
678 			dev_pm_domain_detach(pd_chan, false);
679 			goto detach;
680 		}
681 
682 		fsl_chan->pd_dev = pd_chan;
683 
684 		pm_runtime_use_autosuspend(fsl_chan->pd_dev);
685 		pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
686 		pm_runtime_set_active(fsl_chan->pd_dev);
687 	}
688 
689 	return 0;
690 
691 detach:
692 	fsl_edma3_detach_pd(fsl_edma);
693 	return -EINVAL;
694 }
695 
fsl_edma_probe(struct platform_device * pdev)696 static int fsl_edma_probe(struct platform_device *pdev)
697 {
698 	struct device_node *np = pdev->dev.of_node;
699 	struct fsl_edma_engine *fsl_edma;
700 	const struct fsl_edma_drvdata *drvdata = NULL;
701 	u32 chan_mask[2] = {0, 0};
702 	char clk_name[36];
703 	struct edma_regs *regs;
704 	int chans;
705 	int ret, i;
706 
707 	drvdata = device_get_match_data(&pdev->dev);
708 	if (!drvdata) {
709 		dev_err(&pdev->dev, "unable to find driver data\n");
710 		return -EINVAL;
711 	}
712 
713 	ret = of_property_read_u32(np, "dma-channels", &chans);
714 	if (ret) {
715 		dev_err(&pdev->dev, "Can't get dma-channels.\n");
716 		return ret;
717 	}
718 
719 	fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
720 				GFP_KERNEL);
721 	if (!fsl_edma)
722 		return -ENOMEM;
723 
724 	fsl_edma->errirq = -EINVAL;
725 	fsl_edma->txirq = -EINVAL;
726 	fsl_edma->drvdata = drvdata;
727 	fsl_edma->n_chans = chans;
728 	mutex_init(&fsl_edma->fsl_edma_mutex);
729 
730 	fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
731 	if (IS_ERR(fsl_edma->membase))
732 		return PTR_ERR(fsl_edma->membase);
733 
734 	if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
735 		fsl_edma_setup_regs(fsl_edma);
736 		regs = &fsl_edma->regs;
737 	}
738 
739 	if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
740 		fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
741 		if (IS_ERR(fsl_edma->dmaclk)) {
742 			dev_err(&pdev->dev, "Missing DMA block clock.\n");
743 			return PTR_ERR(fsl_edma->dmaclk);
744 		}
745 	}
746 
747 	ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
748 
749 	if (ret > 0) {
750 		fsl_edma->chan_masked = chan_mask[1];
751 		fsl_edma->chan_masked <<= 32;
752 		fsl_edma->chan_masked |= chan_mask[0];
753 	}
754 
755 	for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
756 		char clkname[32];
757 
758 		fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
759 								      1 + i);
760 		if (IS_ERR(fsl_edma->muxbase[i])) {
761 			/* on error: disable all previously enabled clks */
762 			fsl_disable_clocks(fsl_edma, i);
763 			return PTR_ERR(fsl_edma->muxbase[i]);
764 		}
765 
766 		sprintf(clkname, "dmamux%d", i);
767 		fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
768 		if (IS_ERR(fsl_edma->muxclk[i])) {
769 			dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
770 			/* on error: disable all previously enabled clks */
771 			return PTR_ERR(fsl_edma->muxclk[i]);
772 		}
773 	}
774 
775 	fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
776 
777 	if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
778 		ret = fsl_edma3_attach_pd(pdev, fsl_edma);
779 		if (ret)
780 			return ret;
781 		ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
782 		if (ret)
783 			return ret;
784 	}
785 
786 	if (drvdata->flags & FSL_EDMA_DRV_TCD64)
787 		dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
788 
789 	INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
790 	for (i = 0; i < fsl_edma->n_chans; i++) {
791 		struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
792 		int len;
793 
794 		if (fsl_edma->chan_masked & BIT(i))
795 			continue;
796 
797 		snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
798 							   dev_name(&pdev->dev), i);
799 
800 		snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
801 			 "%s-CH%02d-err", dev_name(&pdev->dev), i);
802 
803 		fsl_chan->edma = fsl_edma;
804 		fsl_chan->pm_state = RUNNING;
805 		fsl_chan->srcid = 0;
806 		fsl_chan->dma_dir = DMA_NONE;
807 		fsl_chan->vchan.desc_free = fsl_edma_free_desc;
808 
809 		len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
810 				offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
811 		fsl_chan->tcd = fsl_edma->membase
812 				+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
813 		fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
814 
815 		if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
816 			snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
817 			fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
818 							     (const char *)clk_name);
819 
820 			if (IS_ERR(fsl_chan->clk))
821 				return PTR_ERR(fsl_chan->clk);
822 		}
823 		fsl_chan->pdev = pdev;
824 		vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
825 
826 		edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
827 		fsl_edma_chan_mux(fsl_chan, 0, false);
828 		if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
829 			clk_disable_unprepare(fsl_chan->clk);
830 	}
831 
832 	ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
833 	if (ret)
834 		return ret;
835 
836 	dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
837 	dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
838 	dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
839 	dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
840 
841 	fsl_edma->dma_dev.dev = &pdev->dev;
842 	fsl_edma->dma_dev.device_alloc_chan_resources
843 		= fsl_edma_alloc_chan_resources;
844 	fsl_edma->dma_dev.device_free_chan_resources
845 		= fsl_edma_free_chan_resources;
846 	fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
847 	fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
848 	fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
849 	fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
850 	fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
851 	fsl_edma->dma_dev.device_pause = fsl_edma_pause;
852 	fsl_edma->dma_dev.device_resume = fsl_edma_resume;
853 	fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
854 	fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
855 	fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
856 
857 	fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
858 	fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
859 
860 	if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
861 		fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
862 		fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
863 	}
864 
865 	fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
866 	if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
867 		fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
868 
869 	fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
870 					DMAENGINE_ALIGN_64_BYTES :
871 					DMAENGINE_ALIGN_32_BYTES;
872 
873 	/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
874 	dma_set_max_seg_size(fsl_edma->dma_dev.dev,
875 			     FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
876 
877 	fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
878 
879 	platform_set_drvdata(pdev, fsl_edma);
880 
881 	ret = dma_async_device_register(&fsl_edma->dma_dev);
882 	if (ret) {
883 		dev_err(&pdev->dev,
884 			"Can't register Freescale eDMA engine. (%d)\n", ret);
885 		return ret;
886 	}
887 
888 	ret = of_dma_controller_register(np,
889 			drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
890 			fsl_edma);
891 	if (ret) {
892 		dev_err(&pdev->dev,
893 			"Can't register Freescale eDMA of_dma. (%d)\n", ret);
894 		dma_async_device_unregister(&fsl_edma->dma_dev);
895 		return ret;
896 	}
897 
898 	/* enable round robin arbitration */
899 	if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
900 		edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
901 
902 	return 0;
903 }
904 
fsl_edma_remove(struct platform_device * pdev)905 static void fsl_edma_remove(struct platform_device *pdev)
906 {
907 	struct device_node *np = pdev->dev.of_node;
908 	struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
909 
910 	fsl_edma_irq_exit(pdev, fsl_edma);
911 	of_dma_controller_free(np);
912 	dma_async_device_unregister(&fsl_edma->dma_dev);
913 	fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
914 }
915 
fsl_edma_suspend_late(struct device * dev)916 static int fsl_edma_suspend_late(struct device *dev)
917 {
918 	struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
919 	struct fsl_edma_chan *fsl_chan;
920 	unsigned long flags;
921 	int i;
922 
923 	for (i = 0; i < fsl_edma->n_chans; i++) {
924 		fsl_chan = &fsl_edma->chans[i];
925 		if (fsl_edma->chan_masked & BIT(i))
926 			continue;
927 		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
928 		/* Make sure chan is idle or will force disable. */
929 		if (unlikely(fsl_chan->status == DMA_IN_PROGRESS)) {
930 			dev_warn(dev, "WARN: There is non-idle channel.\n");
931 			fsl_edma_disable_request(fsl_chan);
932 			fsl_edma_chan_mux(fsl_chan, 0, false);
933 		}
934 
935 		fsl_chan->pm_state = SUSPENDED;
936 		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
937 	}
938 
939 	return 0;
940 }
941 
fsl_edma_resume_early(struct device * dev)942 static int fsl_edma_resume_early(struct device *dev)
943 {
944 	struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
945 	struct fsl_edma_chan *fsl_chan;
946 	struct edma_regs *regs = &fsl_edma->regs;
947 	int i;
948 
949 	for (i = 0; i < fsl_edma->n_chans; i++) {
950 		fsl_chan = &fsl_edma->chans[i];
951 		if (fsl_edma->chan_masked & BIT(i))
952 			continue;
953 		fsl_chan->pm_state = RUNNING;
954 		edma_write_tcdreg(fsl_chan, 0, csr);
955 		if (fsl_chan->srcid != 0)
956 			fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true);
957 	}
958 
959 	if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
960 		edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
961 
962 	return 0;
963 }
964 
965 /*
966  * eDMA provides the service to others, so it should be suspend late
967  * and resume early. When eDMA suspend, all of the clients should stop
968  * the DMA data transmission and let the channel idle.
969  */
970 static const struct dev_pm_ops fsl_edma_pm_ops = {
971 	.suspend_late   = fsl_edma_suspend_late,
972 	.resume_early   = fsl_edma_resume_early,
973 };
974 
975 static struct platform_driver fsl_edma_driver = {
976 	.driver		= {
977 		.name	= "fsl-edma",
978 		.of_match_table = fsl_edma_dt_ids,
979 		.pm     = &fsl_edma_pm_ops,
980 	},
981 	.probe          = fsl_edma_probe,
982 	.remove		= fsl_edma_remove,
983 };
984 
fsl_edma_init(void)985 static int __init fsl_edma_init(void)
986 {
987 	return platform_driver_register(&fsl_edma_driver);
988 }
989 subsys_initcall(fsl_edma_init);
990 
fsl_edma_exit(void)991 static void __exit fsl_edma_exit(void)
992 {
993 	platform_driver_unregister(&fsl_edma_driver);
994 }
995 module_exit(fsl_edma_exit);
996 
997 MODULE_DESCRIPTION("Freescale eDMA engine driver");
998 MODULE_LICENSE("GPL v2");
999