xref: /linux/drivers/dma/mcf-edma-main.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5 
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/dmaengine.h>
9 #include <linux/platform_device.h>
10 #include <linux/platform_data/dma-mcf-edma.h>
11 
12 #include "fsl-edma-common.h"
13 
14 #define EDMA_CHANNELS		64
15 #define EDMA_MASK_CH(x)		((x) & GENMASK(5, 0))
16 
mcf_edma_tx_handler(int irq,void * dev_id)17 static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
18 {
19 	struct fsl_edma_engine *mcf_edma = dev_id;
20 	struct edma_regs *regs = &mcf_edma->regs;
21 	unsigned int ch;
22 	u64 intmap;
23 
24 	intmap = ioread32(regs->inth);
25 	intmap <<= 32;
26 	intmap |= ioread32(regs->intl);
27 	if (!intmap)
28 		return IRQ_NONE;
29 
30 	for (ch = 0; ch < mcf_edma->n_chans; ch++) {
31 		if (intmap & BIT(ch)) {
32 			iowrite8(EDMA_MASK_CH(ch), regs->cint);
33 			fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
34 		}
35 	}
36 
37 	return IRQ_HANDLED;
38 }
39 
mcf_edma_err_handler(int irq,void * dev_id)40 static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
41 {
42 	struct fsl_edma_engine *mcf_edma = dev_id;
43 	struct edma_regs *regs = &mcf_edma->regs;
44 	unsigned int err, ch;
45 
46 	err = ioread32(regs->errl);
47 	if (!err)
48 		return IRQ_NONE;
49 
50 	for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
51 		if (err & BIT(ch)) {
52 			fsl_edma_disable_request(&mcf_edma->chans[ch]);
53 			iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
54 			fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
55 		}
56 	}
57 
58 	err = ioread32(regs->errh);
59 	if (!err)
60 		return IRQ_NONE;
61 
62 	for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
63 		if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
64 			fsl_edma_disable_request(&mcf_edma->chans[ch]);
65 			iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
66 			mcf_edma->chans[ch].status = DMA_ERROR;
67 		}
68 	}
69 
70 	return IRQ_HANDLED;
71 }
72 
mcf_edma_irq_init(struct platform_device * pdev,struct fsl_edma_engine * mcf_edma)73 static int mcf_edma_irq_init(struct platform_device *pdev,
74 				struct fsl_edma_engine *mcf_edma)
75 {
76 	int ret = 0, i;
77 	struct resource *res;
78 
79 	res = platform_get_resource_byname(pdev,
80 				IORESOURCE_IRQ, "edma-tx-00-15");
81 	if (!res)
82 		return -1;
83 
84 	for (ret = 0, i = res->start; i <= res->end; ++i)
85 		ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
86 	if (ret)
87 		return ret;
88 
89 	res = platform_get_resource_byname(pdev,
90 			IORESOURCE_IRQ, "edma-tx-16-55");
91 	if (!res)
92 		return -1;
93 
94 	for (ret = 0, i = res->start; i <= res->end; ++i)
95 		ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
96 	if (ret)
97 		return ret;
98 
99 	ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
100 	if (ret != -ENXIO) {
101 		ret = request_irq(ret, mcf_edma_tx_handler,
102 				  0, "eDMA", mcf_edma);
103 		if (ret)
104 			return ret;
105 	}
106 
107 	ret = platform_get_irq_byname(pdev, "edma-err");
108 	if (ret != -ENXIO) {
109 		ret = request_irq(ret, mcf_edma_err_handler,
110 				  0, "eDMA", mcf_edma);
111 		if (ret)
112 			return ret;
113 	}
114 
115 	return 0;
116 }
117 
mcf_edma_irq_free(struct platform_device * pdev,struct fsl_edma_engine * mcf_edma)118 static void mcf_edma_irq_free(struct platform_device *pdev,
119 				struct fsl_edma_engine *mcf_edma)
120 {
121 	int irq;
122 	struct resource *res;
123 
124 	res = platform_get_resource_byname(pdev,
125 			IORESOURCE_IRQ, "edma-tx-00-15");
126 	if (res) {
127 		for (irq = res->start; irq <= res->end; irq++)
128 			free_irq(irq, mcf_edma);
129 	}
130 
131 	res = platform_get_resource_byname(pdev,
132 			IORESOURCE_IRQ, "edma-tx-16-55");
133 	if (res) {
134 		for (irq = res->start; irq <= res->end; irq++)
135 			free_irq(irq, mcf_edma);
136 	}
137 
138 	irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
139 	if (irq != -ENXIO)
140 		free_irq(irq, mcf_edma);
141 
142 	irq = platform_get_irq_byname(pdev, "edma-err");
143 	if (irq != -ENXIO)
144 		free_irq(irq, mcf_edma);
145 }
146 
147 static struct fsl_edma_drvdata mcf_data = {
148 	.flags = FSL_EDMA_DRV_EDMA64,
149 	.setup_irq = mcf_edma_irq_init,
150 };
151 
mcf_edma_probe(struct platform_device * pdev)152 static int mcf_edma_probe(struct platform_device *pdev)
153 {
154 	struct mcf_edma_platform_data *pdata;
155 	struct fsl_edma_engine *mcf_edma;
156 	struct edma_regs *regs;
157 	int ret, i, chans;
158 
159 	pdata = dev_get_platdata(&pdev->dev);
160 	if (!pdata) {
161 		dev_err(&pdev->dev, "no platform data supplied\n");
162 		return -EINVAL;
163 	}
164 
165 	if (!pdata->dma_channels) {
166 		dev_info(&pdev->dev, "setting default channel number to 64");
167 		chans = 64;
168 	} else {
169 		chans = pdata->dma_channels;
170 	}
171 
172 	mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
173 				GFP_KERNEL);
174 	if (!mcf_edma)
175 		return -ENOMEM;
176 
177 	mcf_edma->n_chans = chans;
178 
179 	/* Set up drvdata for ColdFire edma */
180 	mcf_edma->drvdata = &mcf_data;
181 	mcf_edma->big_endian = 1;
182 
183 	mutex_init(&mcf_edma->fsl_edma_mutex);
184 
185 	mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
186 	if (IS_ERR(mcf_edma->membase))
187 		return PTR_ERR(mcf_edma->membase);
188 
189 	fsl_edma_setup_regs(mcf_edma);
190 	regs = &mcf_edma->regs;
191 
192 	INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
193 	for (i = 0; i < mcf_edma->n_chans; i++) {
194 		struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
195 
196 		mcf_chan->edma = mcf_edma;
197 		mcf_chan->srcid = i;
198 		mcf_chan->dma_dir = DMA_NONE;
199 		mcf_chan->vchan.desc_free = fsl_edma_free_desc;
200 		vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
201 		mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
202 				+ i * sizeof(struct fsl_edma_hw_tcd);
203 		edma_write_tcdreg(mcf_chan, cpu_to_le32(0), csr);
204 	}
205 
206 	iowrite32(~0, regs->inth);
207 	iowrite32(~0, regs->intl);
208 
209 	ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
210 	if (ret)
211 		return ret;
212 
213 	dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
214 	dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
215 	dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
216 
217 	mcf_edma->dma_dev.dev = &pdev->dev;
218 	mcf_edma->dma_dev.device_alloc_chan_resources =
219 			fsl_edma_alloc_chan_resources;
220 	mcf_edma->dma_dev.device_free_chan_resources =
221 			fsl_edma_free_chan_resources;
222 	mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
223 	mcf_edma->dma_dev.device_prep_dma_cyclic =
224 			fsl_edma_prep_dma_cyclic;
225 	mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
226 	mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
227 	mcf_edma->dma_dev.device_pause = fsl_edma_pause;
228 	mcf_edma->dma_dev.device_resume = fsl_edma_resume;
229 	mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
230 	mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
231 
232 	mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
233 	mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
234 	mcf_edma->dma_dev.directions =
235 			BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
236 
237 	mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
238 	mcf_edma->dma_dev.filter.map = pdata->slave_map;
239 	mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
240 
241 	platform_set_drvdata(pdev, mcf_edma);
242 
243 	ret = dma_async_device_register(&mcf_edma->dma_dev);
244 	if (ret) {
245 		dev_err(&pdev->dev,
246 			"Can't register Freescale eDMA engine. (%d)\n", ret);
247 		return ret;
248 	}
249 
250 	/* Enable round robin arbitration */
251 	iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
252 
253 	return 0;
254 }
255 
mcf_edma_remove(struct platform_device * pdev)256 static void mcf_edma_remove(struct platform_device *pdev)
257 {
258 	struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
259 
260 	mcf_edma_irq_free(pdev, mcf_edma);
261 	fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
262 	dma_async_device_unregister(&mcf_edma->dma_dev);
263 }
264 
265 static struct platform_driver mcf_edma_driver = {
266 	.driver		= {
267 		.name	= "mcf-edma",
268 	},
269 	.probe		= mcf_edma_probe,
270 	.remove_new	= mcf_edma_remove,
271 };
272 
mcf_edma_filter_fn(struct dma_chan * chan,void * param)273 bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
274 {
275 	if (chan->device->dev->driver == &mcf_edma_driver.driver) {
276 		struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
277 
278 		return (mcf_chan->srcid == (uintptr_t)param);
279 	}
280 
281 	return false;
282 }
283 EXPORT_SYMBOL(mcf_edma_filter_fn);
284 
mcf_edma_init(void)285 static int __init mcf_edma_init(void)
286 {
287 	return platform_driver_register(&mcf_edma_driver);
288 }
289 subsys_initcall(mcf_edma_init);
290 
mcf_edma_exit(void)291 static void __exit mcf_edma_exit(void)
292 {
293 	platform_driver_unregister(&mcf_edma_driver);
294 }
295 module_exit(mcf_edma_exit);
296 
297 MODULE_ALIAS("platform:mcf-edma");
298 MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
299 MODULE_LICENSE("GPL v2");
300