Lines Matching +full:edma +full:- +full:err
1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
11 #include <linux/dma-mapping.h>
15 #include "fsl-edma-common.h"
49 spin_lock(&fsl_chan->vchan.lock);
51 if (!fsl_chan->edesc) {
53 spin_unlock(&fsl_chan->vchan.lock);
57 if (!fsl_chan->edesc->iscyclic) {
58 list_del(&fsl_chan->edesc->vdesc.node);
59 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
60 fsl_chan->edesc = NULL;
61 fsl_chan->status = DMA_COMPLETE;
63 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
66 if (!fsl_chan->edesc)
69 spin_unlock(&fsl_chan->vchan.lock);
78 if (fsl_chan->is_rxchan)
83 if (fsl_chan->is_remote)
93 if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
94 edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
104 struct edma_regs *regs = &fsl_chan->edma->regs;
105 u32 ch = fsl_chan->vchan.chan.chan_id;
110 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
111 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
112 edma_writeb(fsl_chan->edma, ch, regs->serq);
117 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
118 iowrite8(ch, regs->serq);
130 edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
138 struct edma_regs *regs = &fsl_chan->edma->regs;
139 u32 ch = fsl_chan->vchan.chan.chan_id;
144 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
145 edma_writeb(fsl_chan->edma, ch, regs->cerq);
146 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
151 iowrite8(ch, regs->cerq);
152 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
185 u32 ch = fsl_chan->vchan.chan.chan_id;
188 int endian_diff[4] = {3, 1, -1, -3};
189 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
194 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
195 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
197 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
200 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
203 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
216 val = ffs(addr_width) - 1;
226 for (i = 0; i < fsl_desc->n_tcds; i++)
227 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
228 fsl_desc->tcd[i].ptcd);
238 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
240 fsl_chan->edesc = NULL;
241 fsl_chan->status = DMA_COMPLETE;
242 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
243 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
244 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
247 pm_runtime_allow(fsl_chan->pd_dev);
257 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
258 if (fsl_chan->edesc) {
260 fsl_chan->status = DMA_PAUSED;
262 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
271 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
272 if (fsl_chan->edesc) {
274 fsl_chan->status = DMA_IN_PROGRESS;
276 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
282 if (fsl_chan->dma_dir != DMA_NONE)
283 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
284 fsl_chan->dma_dev_addr,
285 fsl_chan->dma_dev_size,
286 fsl_chan->dma_dir, 0);
287 fsl_chan->dma_dir = DMA_NONE;
293 struct device *dev = fsl_chan->vchan.chan.device->dev;
301 addr = fsl_chan->cfg.dst_addr;
302 size = fsl_chan->cfg.dst_maxburst;
306 addr = fsl_chan->cfg.src_addr;
307 size = fsl_chan->cfg.src_maxburst;
315 if (fsl_chan->dma_dir == dma_dir)
320 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
321 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
323 fsl_chan->dma_dev_size = size;
324 fsl_chan->dma_dir = dma_dir;
334 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
343 struct fsl_edma_desc *edesc = fsl_chan->edesc;
344 enum dma_transfer_direction dir = edesc->dirn;
351 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
352 nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
355 len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
373 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
374 nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
378 size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
381 dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
383 dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
385 len -= size;
387 len += dma_addr + size - cur_addr;
408 return fsl_chan->status;
410 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
411 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
412 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
413 txstate->residue =
416 txstate->residue =
419 txstate->residue = 0;
421 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
423 return fsl_chan->status;
433 * big- or little-endian obeying the eDMA engine model endian,
455 if (fsl_chan->is_sw) {
482 struct dma_slave_config *cfg = &fsl_chan->cfg;
487 * eDMA hardware SGs require the TCDs to be stored in little
500 * we will use minor loop offset, meaning bits 29-10 will be used for
501 * address offset, while bits 9-0 will be used to tell DMA how much
506 if (cfg->direction == DMA_MEM_TO_DEV) {
507 if (fsl_chan->is_multi_fifo)
508 burst = cfg->dst_maxburst * 4;
509 if (cfg->dst_port_window_size)
510 burst = cfg->dst_port_window_size * cfg->dst_addr_width;
512 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
517 if (fsl_chan->is_multi_fifo)
518 burst = cfg->src_maxburst * 4;
519 if (cfg->src_port_window_size)
520 burst = cfg->src_port_window_size * cfg->src_addr_width;
522 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
547 if (fsl_chan->is_rxchan)
550 if (fsl_chan->is_sw)
568 fsl_desc->echan = fsl_chan;
569 fsl_desc->n_tcds = sg_len;
571 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
572 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
573 if (!fsl_desc->tcd[i].vtcd)
574 goto err;
578 err:
579 while (--i >= 0)
580 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
581 fsl_desc->tcd[i].ptcd);
610 fsl_desc->iscyclic = true;
611 fsl_desc->dirn = direction;
615 fsl_chan->attr =
616 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
617 nbytes = fsl_chan->cfg.dst_addr_width *
618 fsl_chan->cfg.dst_maxburst;
620 fsl_chan->attr =
621 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
622 nbytes = fsl_chan->cfg.src_addr_width *
623 fsl_chan->cfg.src_maxburst;
633 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
637 dst_addr = fsl_chan->dma_dev_addr;
638 soff = fsl_chan->cfg.dst_addr_width;
639 doff = fsl_chan->is_multi_fifo ? 4 : 0;
640 if (fsl_chan->cfg.dst_port_window_size)
641 doff = fsl_chan->cfg.dst_addr_width;
643 src_addr = fsl_chan->dma_dev_addr;
645 soff = fsl_chan->is_multi_fifo ? 4 : 0;
646 doff = fsl_chan->cfg.src_addr_width;
647 if (fsl_chan->cfg.src_port_window_size)
648 soff = fsl_chan->cfg.src_addr_width;
651 src_addr = fsl_chan->cfg.src_addr;
652 dst_addr = fsl_chan->cfg.dst_addr;
657 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
658 fsl_chan->attr, soff, nbytes, 0, iter,
663 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
688 fsl_desc->iscyclic = false;
689 fsl_desc->dirn = direction;
692 fsl_chan->attr =
693 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
694 nbytes = fsl_chan->cfg.dst_addr_width *
695 fsl_chan->cfg.dst_maxburst;
697 fsl_chan->attr =
698 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
699 nbytes = fsl_chan->cfg.src_addr_width *
700 fsl_chan->cfg.src_maxburst;
706 dst_addr = fsl_chan->dma_dev_addr;
707 soff = fsl_chan->cfg.dst_addr_width;
710 src_addr = fsl_chan->dma_dev_addr;
713 doff = fsl_chan->cfg.src_addr_width;
716 src_addr = fsl_chan->cfg.src_addr;
717 dst_addr = fsl_chan->cfg.dst_addr;
730 fsl_chan->cfg.src_maxburst :
731 fsl_chan->cfg.dst_maxburst;
734 for (j = burst; j > 1; j--) {
745 if (i < sg_len - 1) {
746 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
747 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
748 dst_addr, fsl_chan->attr, soff,
753 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
754 dst_addr, fsl_chan->attr, soff,
760 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
773 fsl_desc->iscyclic = false;
775 fsl_chan->is_sw = true;
777 fsl_chan->is_remote = true;
780 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
784 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
791 lockdep_assert_held(&fsl_chan->vchan.lock);
793 vdesc = vchan_next_desc(&fsl_chan->vchan);
796 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
797 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
799 fsl_chan->status = DMA_IN_PROGRESS;
807 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
809 if (unlikely(fsl_chan->pm_state != RUNNING)) {
810 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
815 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
818 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
827 clk_prepare_enable(fsl_chan->clk);
829 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
834 if (fsl_chan->txirq) {
835 ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
836 fsl_chan->chan_name, fsl_chan);
839 dma_pool_destroy(fsl_chan->tcd_pool);
850 struct fsl_edma_engine *edma = fsl_chan->edma;
854 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
856 if (edma->drvdata->dmamuxs)
858 fsl_chan->edesc = NULL;
859 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
861 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
863 if (fsl_chan->txirq)
864 free_irq(fsl_chan->txirq, fsl_chan);
866 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
867 dma_pool_destroy(fsl_chan->tcd_pool);
868 fsl_chan->tcd_pool = NULL;
869 fsl_chan->is_sw = false;
870 fsl_chan->srcid = 0;
871 fsl_chan->is_remote = false;
873 clk_disable_unprepare(fsl_chan->clk);
881 &dmadev->channels, vchan.chan.device_node) {
882 list_del(&chan->vchan.chan.device_node);
883 tasklet_kill(&chan->vchan.task);
888 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
889 * different compared to ColdFire mcf5441x 64 channels edma.
893 * edma "version" and "membase" appropriately.
895 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
897 bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
899 edma->regs.cr = edma->membase + EDMA_CR;
900 edma->regs.es = edma->membase + EDMA_ES;
901 edma->regs.erql = edma->membase + EDMA_ERQ;
902 edma->regs.eeil = edma->membase + EDMA_EEI;
904 edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
905 edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
906 edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
907 edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
908 edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
909 edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
910 edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
911 edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
912 edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
913 edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
916 edma->regs.erqh = edma->membase + EDMA64_ERQH;
917 edma->regs.eeih = edma->membase + EDMA64_EEIH;
918 edma->regs.errh = edma->membase + EDMA64_ERRH;
919 edma->regs.inth = edma->membase + EDMA64_INTH;