1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
5
6 #include <linux/cleanup.h>
7 #include <linux/clk.h>
8 #include <linux/dmapool.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14
15 #include "fsl-edma-common.h"
16
17 #define EDMA_CR 0x00
18 #define EDMA_ES 0x04
19 #define EDMA_ERQ 0x0C
20 #define EDMA_EEI 0x14
21 #define EDMA_SERQ 0x1B
22 #define EDMA_CERQ 0x1A
23 #define EDMA_SEEI 0x19
24 #define EDMA_CEEI 0x18
25 #define EDMA_CINT 0x1F
26 #define EDMA_CERR 0x1E
27 #define EDMA_SSRT 0x1D
28 #define EDMA_CDNE 0x1C
29 #define EDMA_INTR 0x24
30 #define EDMA_ERR 0x2C
31
32 #define EDMA64_ERQH 0x08
33 #define EDMA64_EEIH 0x10
34 #define EDMA64_SERQ 0x18
35 #define EDMA64_CERQ 0x19
36 #define EDMA64_SEEI 0x1a
37 #define EDMA64_CEEI 0x1b
38 #define EDMA64_CINT 0x1c
39 #define EDMA64_CERR 0x1d
40 #define EDMA64_SSRT 0x1e
41 #define EDMA64_CDNE 0x1f
42 #define EDMA64_INTH 0x20
43 #define EDMA64_INTL 0x24
44 #define EDMA64_ERRH 0x28
45 #define EDMA64_ERRL 0x2c
46
fsl_edma_tx_chan_handler(struct fsl_edma_chan * fsl_chan)47 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
48 {
49 spin_lock(&fsl_chan->vchan.lock);
50
51 if (!fsl_chan->edesc) {
52 /* terminate_all called before */
53 spin_unlock(&fsl_chan->vchan.lock);
54 return;
55 }
56
57 if (!fsl_chan->edesc->iscyclic) {
58 list_del(&fsl_chan->edesc->vdesc.node);
59 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
60 fsl_chan->edesc = NULL;
61 fsl_chan->status = DMA_COMPLETE;
62 } else {
63 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
64 }
65
66 if (!fsl_chan->edesc)
67 fsl_edma_xfer_desc(fsl_chan);
68
69 spin_unlock(&fsl_chan->vchan.lock);
70 }
71
fsl_edma3_enable_request(struct fsl_edma_chan * fsl_chan)72 static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
73 {
74 u32 val, flags;
75
76 flags = fsl_edma_drvflags(fsl_chan);
77 val = edma_readl_chreg(fsl_chan, ch_sbr);
78 if (fsl_chan->is_rxchan)
79 val |= EDMA_V3_CH_SBR_RD;
80 else
81 val |= EDMA_V3_CH_SBR_WR;
82
83 if (fsl_chan->is_remote)
84 val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
85
86 edma_writel_chreg(fsl_chan, val, ch_sbr);
87
88 if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
89 /*
90 * ch_mux: With the exception of 0, attempts to write a value
91 * already in use will be forced to 0.
92 */
93 if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
94 edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
95 }
96
97 val = edma_readl_chreg(fsl_chan, ch_csr);
98 val |= EDMA_V3_CH_CSR_ERQ;
99 edma_writel_chreg(fsl_chan, val, ch_csr);
100 }
101
fsl_edma_enable_request(struct fsl_edma_chan * fsl_chan)102 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
103 {
104 struct edma_regs *regs = &fsl_chan->edma->regs;
105 u32 ch = fsl_chan->vchan.chan.chan_id;
106
107 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
108 return fsl_edma3_enable_request(fsl_chan);
109
110 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
111 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
112 edma_writeb(fsl_chan->edma, ch, regs->serq);
113 } else {
114 /* ColdFire is big endian, and accesses natively
115 * big endian I/O peripherals
116 */
117 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
118 iowrite8(ch, regs->serq);
119 }
120 }
121
fsl_edma3_disable_request(struct fsl_edma_chan * fsl_chan)122 static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
123 {
124 u32 val = edma_readl_chreg(fsl_chan, ch_csr);
125 u32 flags;
126
127 flags = fsl_edma_drvflags(fsl_chan);
128
129 if (flags & FSL_EDMA_DRV_HAS_CHMUX)
130 edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
131
132 val &= ~EDMA_V3_CH_CSR_ERQ;
133 edma_writel_chreg(fsl_chan, val, ch_csr);
134 }
135
fsl_edma_disable_request(struct fsl_edma_chan * fsl_chan)136 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
137 {
138 struct edma_regs *regs = &fsl_chan->edma->regs;
139 u32 ch = fsl_chan->vchan.chan.chan_id;
140
141 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
142 return fsl_edma3_disable_request(fsl_chan);
143
144 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
145 edma_writeb(fsl_chan->edma, ch, regs->cerq);
146 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
147 } else {
148 /* ColdFire is big endian, and accesses natively
149 * big endian I/O peripherals
150 */
151 iowrite8(ch, regs->cerq);
152 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
153 }
154 }
155
mux_configure8(struct fsl_edma_chan * fsl_chan,void __iomem * addr,u32 off,u32 slot,bool enable)156 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
157 u32 off, u32 slot, bool enable)
158 {
159 u8 val8;
160
161 if (enable)
162 val8 = EDMAMUX_CHCFG_ENBL | slot;
163 else
164 val8 = EDMAMUX_CHCFG_DIS;
165
166 iowrite8(val8, addr + off);
167 }
168
mux_configure32(struct fsl_edma_chan * fsl_chan,void __iomem * addr,u32 off,u32 slot,bool enable)169 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
170 u32 off, u32 slot, bool enable)
171 {
172 u32 val;
173
174 if (enable)
175 val = EDMAMUX_CHCFG_ENBL << 24 | slot;
176 else
177 val = EDMAMUX_CHCFG_DIS;
178
179 iowrite32(val, addr + off * 4);
180 }
181
fsl_edma_chan_mux(struct fsl_edma_chan * fsl_chan,unsigned int slot,bool enable)182 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
183 unsigned int slot, bool enable)
184 {
185 u32 ch = fsl_chan->vchan.chan.chan_id;
186 void __iomem *muxaddr;
187 unsigned int chans_per_mux, ch_off;
188 int endian_diff[4] = {3, 1, -1, -3};
189 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
190
191 if (!dmamux_nr)
192 return;
193
194 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
195 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
196
197 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
198 ch_off += endian_diff[ch_off % 4];
199
200 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
201 slot = EDMAMUX_CHCFG_SOURCE(slot);
202
203 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
204 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
205 else
206 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
207 }
208
fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)209 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
210 {
211 u32 val;
212
213 if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
214 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
215
216 val = ffs(addr_width) - 1;
217 return val | (val << 8);
218 }
219
fsl_edma_free_desc(struct virt_dma_desc * vdesc)220 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
221 {
222 struct fsl_edma_desc *fsl_desc;
223 int i;
224
225 fsl_desc = to_fsl_edma_desc(vdesc);
226 for (i = 0; i < fsl_desc->n_tcds; i++)
227 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
228 fsl_desc->tcd[i].ptcd);
229 kfree(fsl_desc);
230 }
231
fsl_edma_terminate_all(struct dma_chan * chan)232 int fsl_edma_terminate_all(struct dma_chan *chan)
233 {
234 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
235 unsigned long flags;
236 LIST_HEAD(head);
237
238 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
239 fsl_edma_disable_request(fsl_chan);
240 fsl_chan->edesc = NULL;
241 fsl_chan->status = DMA_COMPLETE;
242 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
243 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
244 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
245
246 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
247 pm_runtime_allow(fsl_chan->pd_dev);
248
249 return 0;
250 }
251
fsl_edma_pause(struct dma_chan * chan)252 int fsl_edma_pause(struct dma_chan *chan)
253 {
254 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
255 unsigned long flags;
256
257 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
258 if (fsl_chan->edesc) {
259 fsl_edma_disable_request(fsl_chan);
260 fsl_chan->status = DMA_PAUSED;
261 }
262 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
263 return 0;
264 }
265
fsl_edma_resume(struct dma_chan * chan)266 int fsl_edma_resume(struct dma_chan *chan)
267 {
268 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
269 unsigned long flags;
270
271 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
272 if (fsl_chan->edesc) {
273 fsl_edma_enable_request(fsl_chan);
274 fsl_chan->status = DMA_IN_PROGRESS;
275 }
276 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
277 return 0;
278 }
279
fsl_edma_unprep_slave_dma(struct fsl_edma_chan * fsl_chan)280 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
281 {
282 if (fsl_chan->dma_dir != DMA_NONE)
283 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
284 fsl_chan->dma_dev_addr,
285 fsl_chan->dma_dev_size,
286 fsl_chan->dma_dir, 0);
287 fsl_chan->dma_dir = DMA_NONE;
288 }
289
fsl_edma_prep_slave_dma(struct fsl_edma_chan * fsl_chan,enum dma_transfer_direction dir)290 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
291 enum dma_transfer_direction dir)
292 {
293 struct device *dev = fsl_chan->vchan.chan.device->dev;
294 enum dma_data_direction dma_dir;
295 phys_addr_t addr = 0;
296 u32 size = 0;
297
298 switch (dir) {
299 case DMA_MEM_TO_DEV:
300 dma_dir = DMA_FROM_DEVICE;
301 addr = fsl_chan->cfg.dst_addr;
302 size = fsl_chan->cfg.dst_maxburst;
303 break;
304 case DMA_DEV_TO_MEM:
305 dma_dir = DMA_TO_DEVICE;
306 addr = fsl_chan->cfg.src_addr;
307 size = fsl_chan->cfg.src_maxburst;
308 break;
309 default:
310 dma_dir = DMA_NONE;
311 break;
312 }
313
314 /* Already mapped for this config? */
315 if (fsl_chan->dma_dir == dma_dir)
316 return true;
317
318 fsl_edma_unprep_slave_dma(fsl_chan);
319
320 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
321 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
322 return false;
323 fsl_chan->dma_dev_size = size;
324 fsl_chan->dma_dir = dma_dir;
325
326 return true;
327 }
328
fsl_edma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)329 int fsl_edma_slave_config(struct dma_chan *chan,
330 struct dma_slave_config *cfg)
331 {
332 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
333
334 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
335 fsl_edma_unprep_slave_dma(fsl_chan);
336
337 return 0;
338 }
339
fsl_edma_desc_residue(struct fsl_edma_chan * fsl_chan,struct virt_dma_desc * vdesc,bool in_progress)340 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
341 struct virt_dma_desc *vdesc, bool in_progress)
342 {
343 struct fsl_edma_desc *edesc = fsl_chan->edesc;
344 enum dma_transfer_direction dir = edesc->dirn;
345 dma_addr_t cur_addr, dma_addr, old_addr;
346 size_t len, size;
347 u32 nbytes = 0;
348 int i;
349
350 /* calculate the total size in this desc */
351 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
352 nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
353 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
354 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
355 len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
356 }
357
358 if (!in_progress)
359 return len;
360
361 /* 64bit read is not atomic, need read retry when high 32bit changed */
362 do {
363 if (dir == DMA_MEM_TO_DEV) {
364 old_addr = edma_read_tcdreg(fsl_chan, saddr);
365 cur_addr = edma_read_tcdreg(fsl_chan, saddr);
366 } else {
367 old_addr = edma_read_tcdreg(fsl_chan, daddr);
368 cur_addr = edma_read_tcdreg(fsl_chan, daddr);
369 }
370 } while (upper_32_bits(cur_addr) != upper_32_bits(old_addr));
371
372 /* figure out the finished and calculate the residue */
373 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
374 nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
375 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
376 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
377
378 size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
379
380 if (dir == DMA_MEM_TO_DEV)
381 dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
382 else
383 dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
384
385 len -= size;
386 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
387 len += dma_addr + size - cur_addr;
388 break;
389 }
390 }
391
392 return len;
393 }
394
fsl_edma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)395 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
396 dma_cookie_t cookie, struct dma_tx_state *txstate)
397 {
398 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
399 struct virt_dma_desc *vdesc;
400 enum dma_status status;
401 unsigned long flags;
402
403 status = dma_cookie_status(chan, cookie, txstate);
404 if (status == DMA_COMPLETE)
405 return status;
406
407 if (!txstate)
408 return fsl_chan->status;
409
410 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
411 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
412 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
413 txstate->residue =
414 fsl_edma_desc_residue(fsl_chan, vdesc, true);
415 else if (vdesc)
416 txstate->residue =
417 fsl_edma_desc_residue(fsl_chan, vdesc, false);
418 else
419 txstate->residue = 0;
420
421 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
422
423 return fsl_chan->status;
424 }
425
fsl_edma_set_tcd_regs(struct fsl_edma_chan * fsl_chan,void * tcd)426 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
427 {
428 u16 csr = 0;
429
430 /*
431 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
432 * endian format. However, we need to load the TCD registers in
433 * big- or little-endian obeying the eDMA engine model endian,
434 * and this is performed from specific edma_write functions
435 */
436 edma_write_tcdreg(fsl_chan, 0, csr);
437
438 edma_cp_tcd_to_reg(fsl_chan, tcd, saddr);
439 edma_cp_tcd_to_reg(fsl_chan, tcd, daddr);
440
441 edma_cp_tcd_to_reg(fsl_chan, tcd, attr);
442 edma_cp_tcd_to_reg(fsl_chan, tcd, soff);
443
444 edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes);
445 edma_cp_tcd_to_reg(fsl_chan, tcd, slast);
446
447 edma_cp_tcd_to_reg(fsl_chan, tcd, citer);
448 edma_cp_tcd_to_reg(fsl_chan, tcd, biter);
449 edma_cp_tcd_to_reg(fsl_chan, tcd, doff);
450
451 edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga);
452
453 csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
454
455 if (fsl_chan->is_sw) {
456 csr |= EDMA_TCD_CSR_START;
457 fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
458 }
459
460 /*
461 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
462 * eDMAv4 have not such requirement.
463 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
464 */
465 if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
466 (csr & EDMA_TCD_CSR_E_SG)) ||
467 ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
468 (csr & EDMA_TCD_CSR_E_LINK)))
469 edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
470
471
472 edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
473 }
474
475 static inline
fsl_edma_fill_tcd(struct fsl_edma_chan * fsl_chan,struct fsl_edma_hw_tcd * tcd,dma_addr_t src,dma_addr_t dst,u16 attr,u16 soff,u32 nbytes,dma_addr_t slast,u16 citer,u16 biter,u16 doff,dma_addr_t dlast_sga,bool major_int,bool disable_req,bool enable_sg)476 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
477 struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
478 u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
479 u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
480 bool disable_req, bool enable_sg)
481 {
482 struct dma_slave_config *cfg = &fsl_chan->cfg;
483 u16 csr = 0;
484 u32 burst;
485
486 /*
487 * eDMA hardware SGs require the TCDs to be stored in little
488 * endian format irrespective of the register endian model.
489 * So we put the value in little endian in memory, waiting
490 * for fsl_edma_set_tcd_regs doing the swap.
491 */
492 fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr);
493 fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr);
494
495 fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr);
496
497 fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
498
499 if (fsl_chan->is_multi_fifo) {
500 /* set mloff to support multiple fifo */
501 burst = cfg->direction == DMA_DEV_TO_MEM ?
502 cfg->src_maxburst : cfg->dst_maxburst;
503 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
504 /* enable DMLOE/SMLOE */
505 if (cfg->direction == DMA_MEM_TO_DEV) {
506 nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
507 nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
508 } else {
509 nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
510 nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
511 }
512 }
513
514 fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes);
515 fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast);
516
517 fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
518 fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
519
520 fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
521
522 fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
523
524 if (major_int)
525 csr |= EDMA_TCD_CSR_INT_MAJOR;
526
527 if (disable_req)
528 csr |= EDMA_TCD_CSR_D_REQ;
529
530 if (enable_sg)
531 csr |= EDMA_TCD_CSR_E_SG;
532
533 if (fsl_chan->is_rxchan)
534 csr |= EDMA_TCD_CSR_ACTIVE;
535
536 if (fsl_chan->is_sw)
537 csr |= EDMA_TCD_CSR_START;
538
539 fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
540
541 trace_edma_fill_tcd(fsl_chan, tcd);
542 }
543
fsl_edma_alloc_desc(struct fsl_edma_chan * fsl_chan,int sg_len)544 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
545 int sg_len)
546 {
547 struct fsl_edma_desc *fsl_desc;
548 int i;
549
550 fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
551 if (!fsl_desc)
552 return NULL;
553
554 fsl_desc->echan = fsl_chan;
555 fsl_desc->n_tcds = sg_len;
556 for (i = 0; i < sg_len; i++) {
557 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
558 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
559 if (!fsl_desc->tcd[i].vtcd)
560 goto err;
561 }
562 return fsl_desc;
563
564 err:
565 while (--i >= 0)
566 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
567 fsl_desc->tcd[i].ptcd);
568 kfree(fsl_desc);
569 return NULL;
570 }
571
fsl_edma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)572 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
573 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
574 size_t period_len, enum dma_transfer_direction direction,
575 unsigned long flags)
576 {
577 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
578 struct fsl_edma_desc *fsl_desc;
579 dma_addr_t dma_buf_next;
580 bool major_int = true;
581 int sg_len, i;
582 dma_addr_t src_addr, dst_addr, last_sg;
583 u16 soff, doff, iter;
584 u32 nbytes;
585
586 if (!is_slave_direction(direction))
587 return NULL;
588
589 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
590 return NULL;
591
592 sg_len = buf_len / period_len;
593 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
594 if (!fsl_desc)
595 return NULL;
596 fsl_desc->iscyclic = true;
597 fsl_desc->dirn = direction;
598
599 dma_buf_next = dma_addr;
600 if (direction == DMA_MEM_TO_DEV) {
601 fsl_chan->attr =
602 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
603 nbytes = fsl_chan->cfg.dst_addr_width *
604 fsl_chan->cfg.dst_maxburst;
605 } else {
606 fsl_chan->attr =
607 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
608 nbytes = fsl_chan->cfg.src_addr_width *
609 fsl_chan->cfg.src_maxburst;
610 }
611
612 iter = period_len / nbytes;
613
614 for (i = 0; i < sg_len; i++) {
615 if (dma_buf_next >= dma_addr + buf_len)
616 dma_buf_next = dma_addr;
617
618 /* get next sg's physical address */
619 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
620
621 if (direction == DMA_MEM_TO_DEV) {
622 src_addr = dma_buf_next;
623 dst_addr = fsl_chan->dma_dev_addr;
624 soff = fsl_chan->cfg.dst_addr_width;
625 doff = fsl_chan->is_multi_fifo ? 4 : 0;
626 } else if (direction == DMA_DEV_TO_MEM) {
627 src_addr = fsl_chan->dma_dev_addr;
628 dst_addr = dma_buf_next;
629 soff = fsl_chan->is_multi_fifo ? 4 : 0;
630 doff = fsl_chan->cfg.src_addr_width;
631 } else {
632 /* DMA_DEV_TO_DEV */
633 src_addr = fsl_chan->cfg.src_addr;
634 dst_addr = fsl_chan->cfg.dst_addr;
635 soff = doff = 0;
636 major_int = false;
637 }
638
639 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
640 fsl_chan->attr, soff, nbytes, 0, iter,
641 iter, doff, last_sg, major_int, false, true);
642 dma_buf_next += period_len;
643 }
644
645 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
646 }
647
fsl_edma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)648 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
649 struct dma_chan *chan, struct scatterlist *sgl,
650 unsigned int sg_len, enum dma_transfer_direction direction,
651 unsigned long flags, void *context)
652 {
653 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
654 struct fsl_edma_desc *fsl_desc;
655 struct scatterlist *sg;
656 dma_addr_t src_addr, dst_addr, last_sg;
657 u16 soff, doff, iter;
658 u32 nbytes;
659 int i;
660
661 if (!is_slave_direction(direction))
662 return NULL;
663
664 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
665 return NULL;
666
667 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
668 if (!fsl_desc)
669 return NULL;
670 fsl_desc->iscyclic = false;
671 fsl_desc->dirn = direction;
672
673 if (direction == DMA_MEM_TO_DEV) {
674 fsl_chan->attr =
675 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
676 nbytes = fsl_chan->cfg.dst_addr_width *
677 fsl_chan->cfg.dst_maxburst;
678 } else {
679 fsl_chan->attr =
680 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
681 nbytes = fsl_chan->cfg.src_addr_width *
682 fsl_chan->cfg.src_maxburst;
683 }
684
685 for_each_sg(sgl, sg, sg_len, i) {
686 if (direction == DMA_MEM_TO_DEV) {
687 src_addr = sg_dma_address(sg);
688 dst_addr = fsl_chan->dma_dev_addr;
689 soff = fsl_chan->cfg.dst_addr_width;
690 doff = 0;
691 } else if (direction == DMA_DEV_TO_MEM) {
692 src_addr = fsl_chan->dma_dev_addr;
693 dst_addr = sg_dma_address(sg);
694 soff = 0;
695 doff = fsl_chan->cfg.src_addr_width;
696 } else {
697 /* DMA_DEV_TO_DEV */
698 src_addr = fsl_chan->cfg.src_addr;
699 dst_addr = fsl_chan->cfg.dst_addr;
700 soff = 0;
701 doff = 0;
702 }
703
704 /*
705 * Choose the suitable burst length if sg_dma_len is not
706 * multiple of burst length so that the whole transfer length is
707 * multiple of minor loop(burst length).
708 */
709 if (sg_dma_len(sg) % nbytes) {
710 u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
711 u32 burst = (direction == DMA_DEV_TO_MEM) ?
712 fsl_chan->cfg.src_maxburst :
713 fsl_chan->cfg.dst_maxburst;
714 int j;
715
716 for (j = burst; j > 1; j--) {
717 if (!(sg_dma_len(sg) % (j * width))) {
718 nbytes = j * width;
719 break;
720 }
721 }
722 /* Set burst size as 1 if there's no suitable one */
723 if (j == 1)
724 nbytes = width;
725 }
726 iter = sg_dma_len(sg) / nbytes;
727 if (i < sg_len - 1) {
728 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
729 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
730 dst_addr, fsl_chan->attr, soff,
731 nbytes, 0, iter, iter, doff, last_sg,
732 false, false, true);
733 } else {
734 last_sg = 0;
735 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
736 dst_addr, fsl_chan->attr, soff,
737 nbytes, 0, iter, iter, doff, last_sg,
738 true, true, false);
739 }
740 }
741
742 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
743 }
744
fsl_edma_prep_memcpy(struct dma_chan * chan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)745 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
746 dma_addr_t dma_dst, dma_addr_t dma_src,
747 size_t len, unsigned long flags)
748 {
749 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
750 struct fsl_edma_desc *fsl_desc;
751
752 fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
753 if (!fsl_desc)
754 return NULL;
755 fsl_desc->iscyclic = false;
756
757 fsl_chan->is_sw = true;
758 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
759 fsl_chan->is_remote = true;
760
761 /* To match with copy_align and max_seg_size so 1 tcd is enough */
762 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
763 fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
764 32, len, 0, 1, 1, 32, 0, true, true, false);
765
766 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
767 }
768
fsl_edma_xfer_desc(struct fsl_edma_chan * fsl_chan)769 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
770 {
771 struct virt_dma_desc *vdesc;
772
773 lockdep_assert_held(&fsl_chan->vchan.lock);
774
775 vdesc = vchan_next_desc(&fsl_chan->vchan);
776 if (!vdesc)
777 return;
778 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
779 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
780 fsl_edma_enable_request(fsl_chan);
781 fsl_chan->status = DMA_IN_PROGRESS;
782 }
783
fsl_edma_issue_pending(struct dma_chan * chan)784 void fsl_edma_issue_pending(struct dma_chan *chan)
785 {
786 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
787 unsigned long flags;
788
789 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
790
791 if (unlikely(fsl_chan->pm_state != RUNNING)) {
792 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
793 /* cannot submit due to suspend */
794 return;
795 }
796
797 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
798 fsl_edma_xfer_desc(fsl_chan);
799
800 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
801 }
802
fsl_edma_alloc_chan_resources(struct dma_chan * chan)803 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
804 {
805 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
806 int ret;
807
808 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
809 clk_prepare_enable(fsl_chan->clk);
810
811 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
812 fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
813 sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
814 32, 0);
815
816 if (fsl_chan->txirq) {
817 ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
818 fsl_chan->chan_name, fsl_chan);
819
820 if (ret) {
821 dma_pool_destroy(fsl_chan->tcd_pool);
822 return ret;
823 }
824 }
825
826 return 0;
827 }
828
fsl_edma_free_chan_resources(struct dma_chan * chan)829 void fsl_edma_free_chan_resources(struct dma_chan *chan)
830 {
831 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
832 struct fsl_edma_engine *edma = fsl_chan->edma;
833 unsigned long flags;
834 LIST_HEAD(head);
835
836 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
837 fsl_edma_disable_request(fsl_chan);
838 if (edma->drvdata->dmamuxs)
839 fsl_edma_chan_mux(fsl_chan, 0, false);
840 fsl_chan->edesc = NULL;
841 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
842 fsl_edma_unprep_slave_dma(fsl_chan);
843 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
844
845 if (fsl_chan->txirq)
846 free_irq(fsl_chan->txirq, fsl_chan);
847
848 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
849 dma_pool_destroy(fsl_chan->tcd_pool);
850 fsl_chan->tcd_pool = NULL;
851 fsl_chan->is_sw = false;
852 fsl_chan->srcid = 0;
853 fsl_chan->is_remote = false;
854 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
855 clk_disable_unprepare(fsl_chan->clk);
856 }
857
fsl_edma_cleanup_vchan(struct dma_device * dmadev)858 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
859 {
860 struct fsl_edma_chan *chan, *_chan;
861
862 list_for_each_entry_safe(chan, _chan,
863 &dmadev->channels, vchan.chan.device_node) {
864 list_del(&chan->vchan.chan.device_node);
865 tasklet_kill(&chan->vchan.task);
866 }
867 }
868
869 /*
870 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
871 * different compared to ColdFire mcf5441x 64 channels edma.
872 *
873 * This function sets up register offsets as per proper declared version
874 * so must be called in xxx_edma_probe() just after setting the
875 * edma "version" and "membase" appropriately.
876 */
fsl_edma_setup_regs(struct fsl_edma_engine * edma)877 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
878 {
879 bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
880
881 edma->regs.cr = edma->membase + EDMA_CR;
882 edma->regs.es = edma->membase + EDMA_ES;
883 edma->regs.erql = edma->membase + EDMA_ERQ;
884 edma->regs.eeil = edma->membase + EDMA_EEI;
885
886 edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
887 edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
888 edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
889 edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
890 edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
891 edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
892 edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
893 edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
894 edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
895 edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
896
897 if (is64) {
898 edma->regs.erqh = edma->membase + EDMA64_ERQH;
899 edma->regs.eeih = edma->membase + EDMA64_EEIH;
900 edma->regs.errh = edma->membase + EDMA64_ERRH;
901 edma->regs.inth = edma->membase + EDMA64_INTH;
902 }
903 }
904
905 MODULE_LICENSE("GPL v2");
906