Lines Matching +full:dma +full:- +full:maxburst

1 // SPDX-License-Identifier: GPL-2.0-only
11 #include <linux/dma-mapping.h>
37 #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
39 #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
40 #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
67 #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
74 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
84 * The following 32-bit words are only used in the 64-bit, ie.
89 u32 ddadrh; /* High 32-bit of DDADR */
90 u32 dsadrh; /* High 32-bit of DSADR */
91 u32 dtadrh; /* High 32-bit of DTADR */
138 * struct mmp_pdma_ops - Operations for the MMP PDMA controller
155 * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
198 /* For 32-bit PDMA */
201 writel(addr, phy->base + DDADR(phy->idx)); in write_next_addr_32()
206 return readl(phy->base + DSADR(phy->idx)); in read_src_addr_32()
211 return readl(phy->base + DTADR(phy->idx)); in read_dst_addr_32()
216 desc->ddadr = addr; in set_desc_next_addr_32()
221 desc->dsadr = addr; in set_desc_src_addr_32()
226 desc->dtadr = addr; in set_desc_dst_addr_32()
231 return desc->dsadr; in get_desc_src_addr_32()
236 return desc->dtadr; in get_desc_dst_addr_32()
239 /* For 64-bit PDMA */
242 writel(lower_32_bits(addr), phy->base + DDADR(phy->idx)); in write_next_addr_64()
243 writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx)); in write_next_addr_64()
248 u32 low = readl(phy->base + DSADR(phy->idx)); in read_src_addr_64()
249 u32 high = readl(phy->base + DSADRH(phy->idx)); in read_src_addr_64()
256 u32 low = readl(phy->base + DTADR(phy->idx)); in read_dst_addr_64()
257 u32 high = readl(phy->base + DTADRH(phy->idx)); in read_dst_addr_64()
264 desc->ddadr = lower_32_bits(addr); in set_desc_next_addr_64()
265 desc->ddadrh = upper_32_bits(addr); in set_desc_next_addr_64()
270 desc->dsadr = lower_32_bits(addr); in set_desc_src_addr_64()
271 desc->dsadrh = upper_32_bits(addr); in set_desc_src_addr_64()
276 desc->dtadr = lower_32_bits(addr); in set_desc_dst_addr_64()
277 desc->dtadrh = upper_32_bits(addr); in set_desc_dst_addr_64()
282 return ((u64)desc->dsadrh << 32) | desc->dsadr; in get_desc_src_addr_64()
287 return ((u64)desc->dtadrh << 32) | desc->dtadr; in get_desc_dst_addr_64()
299 if (!phy->vchan) in enable_chan()
302 pdev = to_mmp_pdma_dev(phy->vchan->chan.device); in enable_chan()
304 reg = DRCMR(phy->vchan->drcmr); in enable_chan()
305 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); in enable_chan()
307 dalgn = readl(phy->base + DALGN); in enable_chan()
308 if (phy->vchan->byte_align) in enable_chan()
309 dalgn |= 1 << phy->idx; in enable_chan()
311 dalgn &= ~(1 << phy->idx); in enable_chan()
312 writel(dalgn, phy->base + DALGN); in enable_chan()
314 reg = (phy->idx << 2) + DCSR; in enable_chan()
315 writel(readl(phy->base + reg) | pdev->ops->run_bits, in enable_chan()
316 phy->base + reg); in enable_chan()
326 reg = (phy->idx << 2) + DCSR; in disable_chan()
327 dcsr = readl(phy->base + reg); in disable_chan()
329 if (phy->vchan) { in disable_chan()
332 pdev = to_mmp_pdma_dev(phy->vchan->chan.device); in disable_chan()
333 writel(dcsr & ~pdev->ops->run_bits, phy->base + reg); in disable_chan()
336 writel(dcsr & ~DCSR_RUN, phy->base + reg); in disable_chan()
343 u32 dint = readl(phy->base + DINT); in clear_chan_irq()
344 u32 reg = (phy->idx << 2) + DCSR; in clear_chan_irq()
346 if (!(dint & BIT(phy->idx))) in clear_chan_irq()
347 return -EAGAIN; in clear_chan_irq()
350 dcsr = readl(phy->base + reg); in clear_chan_irq()
351 writel(dcsr, phy->base + reg); in clear_chan_irq()
352 if ((dcsr & DCSR_BUSERR) && (phy->vchan)) in clear_chan_irq()
353 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); in clear_chan_irq()
365 tasklet_schedule(&phy->vchan->tasklet); in mmp_pdma_chan_handler()
373 u32 dint = readl(pdev->base + DINT); in mmp_pdma_int_handler()
380 if (i >= pdev->dma_channels) in mmp_pdma_int_handler()
382 dint &= (dint - 1); in mmp_pdma_int_handler()
383 phy = &pdev->phy[i]; in mmp_pdma_int_handler()
399 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); in lookup_phy()
404 * dma channel priorities in lookup_phy()
405 * ch 0 - 3, 16 - 19 <--> (0) in lookup_phy()
406 * ch 4 - 7, 20 - 23 <--> (1) in lookup_phy()
407 * ch 8 - 11, 24 - 27 <--> (2) in lookup_phy()
408 * ch 12 - 15, 28 - 31 <--> (3) in lookup_phy()
411 spin_lock_irqsave(&pdev->phy_lock, flags); in lookup_phy()
412 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { in lookup_phy()
413 for (i = 0; i < pdev->dma_channels; i++) { in lookup_phy()
416 phy = &pdev->phy[i]; in lookup_phy()
417 if (!phy->vchan) { in lookup_phy()
418 phy->vchan = pchan; in lookup_phy()
426 spin_unlock_irqrestore(&pdev->phy_lock, flags); in lookup_phy()
432 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); in mmp_pdma_free_phy()
436 if (!pchan->phy) in mmp_pdma_free_phy()
440 reg = DRCMR(pchan->drcmr); in mmp_pdma_free_phy()
441 writel(0, pchan->phy->base + reg); in mmp_pdma_free_phy()
443 spin_lock_irqsave(&pdev->phy_lock, flags); in mmp_pdma_free_phy()
444 pchan->phy->vchan = NULL; in mmp_pdma_free_phy()
445 pchan->phy = NULL; in mmp_pdma_free_phy()
446 spin_unlock_irqrestore(&pdev->phy_lock, flags); in mmp_pdma_free_phy()
450 * start_pending_queue - transfer any pending transactions
456 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); in start_pending_queue()
459 if (!chan->idle) { in start_pending_queue()
460 dev_dbg(chan->dev, "DMA controller still busy\n"); in start_pending_queue()
464 if (list_empty(&chan->chain_pending)) { in start_pending_queue()
465 /* chance to re-fetch phy channel with higher prio */ in start_pending_queue()
467 dev_dbg(chan->dev, "no pending list\n"); in start_pending_queue()
471 if (!chan->phy) { in start_pending_queue()
472 chan->phy = lookup_phy(chan); in start_pending_queue()
473 if (!chan->phy) { in start_pending_queue()
474 dev_dbg(chan->dev, "no free dma channel\n"); in start_pending_queue()
480 * pending -> running in start_pending_queue()
483 desc = list_first_entry(&chan->chain_pending, in start_pending_queue()
485 list_splice_tail_init(&chan->chain_pending, &chan->chain_running); in start_pending_queue()
488 * Program the descriptor's address into the DMA controller, in start_pending_queue()
489 * then start the DMA transaction in start_pending_queue()
491 pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys); in start_pending_queue()
492 enable_chan(chan->phy); in start_pending_queue()
493 chan->idle = false; in start_pending_queue()
497 /* desc->tx_list ==> pending list */
500 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); in mmp_pdma_tx_submit()
504 dma_cookie_t cookie = -EBUSY; in mmp_pdma_tx_submit()
506 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_tx_submit()
508 list_for_each_entry(child, &desc->tx_list, node) { in mmp_pdma_tx_submit()
509 cookie = dma_cookie_assign(&child->async_tx); in mmp_pdma_tx_submit()
512 /* softly link to pending list - desc->tx_list ==> pending list */ in mmp_pdma_tx_submit()
513 list_splice_tail_init(&desc->tx_list, &chan->chain_pending); in mmp_pdma_tx_submit()
515 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_tx_submit()
526 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); in mmp_pdma_alloc_descriptor()
528 dev_err(chan->dev, "out of memory for link descriptor\n"); in mmp_pdma_alloc_descriptor()
532 INIT_LIST_HEAD(&desc->tx_list); in mmp_pdma_alloc_descriptor()
533 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in mmp_pdma_alloc_descriptor()
535 desc->async_tx.tx_submit = mmp_pdma_tx_submit; in mmp_pdma_alloc_descriptor()
536 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor()
542 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
544 * This function will create a dma pool for descriptor allocation.
546 * Return - The number of allocated descriptors.
553 if (chan->desc_pool) in mmp_pdma_alloc_chan_resources()
556 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), in mmp_pdma_alloc_chan_resources()
557 chan->dev, in mmp_pdma_alloc_chan_resources()
561 if (!chan->desc_pool) { in mmp_pdma_alloc_chan_resources()
562 dev_err(chan->dev, "unable to allocate descriptor pool\n"); in mmp_pdma_alloc_chan_resources()
563 return -ENOMEM; in mmp_pdma_alloc_chan_resources()
567 chan->idle = true; in mmp_pdma_alloc_chan_resources()
568 chan->dev_addr = 0; in mmp_pdma_alloc_chan_resources()
578 list_del(&desc->node); in mmp_pdma_free_desc_list()
579 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in mmp_pdma_free_desc_list()
588 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_free_chan_resources()
589 mmp_pdma_free_desc_list(chan, &chan->chain_pending); in mmp_pdma_free_chan_resources()
590 mmp_pdma_free_desc_list(chan, &chan->chain_running); in mmp_pdma_free_chan_resources()
591 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_free_chan_resources()
593 dma_pool_destroy(chan->desc_pool); in mmp_pdma_free_chan_resources()
594 chan->desc_pool = NULL; in mmp_pdma_free_chan_resources()
595 chan->idle = true; in mmp_pdma_free_chan_resources()
596 chan->dev_addr = 0; in mmp_pdma_free_chan_resources()
614 pdev = to_mmp_pdma_dev(dchan->device); in mmp_pdma_prep_memcpy()
616 chan->byte_align = false; in mmp_pdma_prep_memcpy()
618 if (!chan->dir) { in mmp_pdma_prep_memcpy()
619 chan->dir = DMA_MEM_TO_MEM; in mmp_pdma_prep_memcpy()
620 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; in mmp_pdma_prep_memcpy()
621 chan->dcmd |= DCMD_BURST32; in mmp_pdma_prep_memcpy()
625 /* Allocate the link descriptor from DMA pool */ in mmp_pdma_prep_memcpy()
628 dev_err(chan->dev, "no memory for desc\n"); in mmp_pdma_prep_memcpy()
634 chan->byte_align = true; in mmp_pdma_prep_memcpy()
636 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); in mmp_pdma_prep_memcpy()
637 pdev->ops->set_desc_src_addr(&new->desc, dma_src); in mmp_pdma_prep_memcpy()
638 pdev->ops->set_desc_dst_addr(&new->desc, dma_dst); in mmp_pdma_prep_memcpy()
643 pdev->ops->set_desc_next_addr(&prev->desc, in mmp_pdma_prep_memcpy()
644 new->async_tx.phys); in mmp_pdma_prep_memcpy()
646 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy()
647 async_tx_ack(&new->async_tx); in mmp_pdma_prep_memcpy()
650 len -= copy; in mmp_pdma_prep_memcpy()
652 if (chan->dir == DMA_MEM_TO_DEV) { in mmp_pdma_prep_memcpy()
654 } else if (chan->dir == DMA_DEV_TO_MEM) { in mmp_pdma_prep_memcpy()
656 } else if (chan->dir == DMA_MEM_TO_MEM) { in mmp_pdma_prep_memcpy()
662 list_add_tail(&new->node, &first->tx_list); in mmp_pdma_prep_memcpy()
665 first->async_tx.flags = flags; /* client is in control of this ack */ in mmp_pdma_prep_memcpy()
666 first->async_tx.cookie = -EBUSY; in mmp_pdma_prep_memcpy()
669 new->desc.ddadr = DDADR_STOP; in mmp_pdma_prep_memcpy()
670 new->desc.dcmd |= DCMD_ENDIRQEN; in mmp_pdma_prep_memcpy()
672 chan->cyclic_first = NULL; in mmp_pdma_prep_memcpy()
674 return &first->async_tx; in mmp_pdma_prep_memcpy()
678 mmp_pdma_free_desc_list(chan, &first->tx_list); in mmp_pdma_prep_memcpy()
688 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device); in mmp_pdma_prep_slave_sg()
698 chan->byte_align = false; in mmp_pdma_prep_slave_sg()
700 mmp_pdma_config_write(dchan, &chan->slave_config, dir); in mmp_pdma_prep_slave_sg()
709 chan->byte_align = true; in mmp_pdma_prep_slave_sg()
714 dev_err(chan->dev, "no memory for desc\n"); in mmp_pdma_prep_slave_sg()
718 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); in mmp_pdma_prep_slave_sg()
720 pdev->ops->set_desc_src_addr(&new->desc, addr); in mmp_pdma_prep_slave_sg()
721 new->desc.dtadr = chan->dev_addr; in mmp_pdma_prep_slave_sg()
723 new->desc.dsadr = chan->dev_addr; in mmp_pdma_prep_slave_sg()
724 pdev->ops->set_desc_dst_addr(&new->desc, addr); in mmp_pdma_prep_slave_sg()
730 pdev->ops->set_desc_next_addr(&prev->desc, in mmp_pdma_prep_slave_sg()
731 new->async_tx.phys); in mmp_pdma_prep_slave_sg()
733 new->async_tx.cookie = 0; in mmp_pdma_prep_slave_sg()
734 async_tx_ack(&new->async_tx); in mmp_pdma_prep_slave_sg()
738 list_add_tail(&new->node, &first->tx_list); in mmp_pdma_prep_slave_sg()
742 avail -= len; in mmp_pdma_prep_slave_sg()
746 first->async_tx.cookie = -EBUSY; in mmp_pdma_prep_slave_sg()
747 first->async_tx.flags = flags; in mmp_pdma_prep_slave_sg()
750 new->desc.ddadr = DDADR_STOP; in mmp_pdma_prep_slave_sg()
751 new->desc.dcmd |= DCMD_ENDIRQEN; in mmp_pdma_prep_slave_sg()
753 chan->dir = dir; in mmp_pdma_prep_slave_sg()
754 chan->cyclic_first = NULL; in mmp_pdma_prep_slave_sg()
756 return &first->async_tx; in mmp_pdma_prep_slave_sg()
760 mmp_pdma_free_desc_list(chan, &first->tx_list); in mmp_pdma_prep_slave_sg()
778 pdev = to_mmp_pdma_dev(dchan->device); in mmp_pdma_prep_dma_cyclic()
788 mmp_pdma_config_write(dchan, &chan->slave_config, direction); in mmp_pdma_prep_dma_cyclic()
793 dma_dst = chan->dev_addr; in mmp_pdma_prep_dma_cyclic()
797 dma_src = chan->dev_addr; in mmp_pdma_prep_dma_cyclic()
800 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); in mmp_pdma_prep_dma_cyclic()
804 chan->dir = direction; in mmp_pdma_prep_dma_cyclic()
807 /* Allocate the link descriptor from DMA pool */ in mmp_pdma_prep_dma_cyclic()
810 dev_err(chan->dev, "no memory for desc\n"); in mmp_pdma_prep_dma_cyclic()
814 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | in mmp_pdma_prep_dma_cyclic()
816 pdev->ops->set_desc_src_addr(&new->desc, dma_src); in mmp_pdma_prep_dma_cyclic()
817 pdev->ops->set_desc_dst_addr(&new->desc, dma_dst); in mmp_pdma_prep_dma_cyclic()
822 pdev->ops->set_desc_next_addr(&prev->desc, in mmp_pdma_prep_dma_cyclic()
823 new->async_tx.phys); in mmp_pdma_prep_dma_cyclic()
825 new->async_tx.cookie = 0; in mmp_pdma_prep_dma_cyclic()
826 async_tx_ack(&new->async_tx); in mmp_pdma_prep_dma_cyclic()
829 len -= period_len; in mmp_pdma_prep_dma_cyclic()
831 if (chan->dir == DMA_MEM_TO_DEV) in mmp_pdma_prep_dma_cyclic()
837 list_add_tail(&new->node, &first->tx_list); in mmp_pdma_prep_dma_cyclic()
840 first->async_tx.flags = flags; /* client is in control of this ack */ in mmp_pdma_prep_dma_cyclic()
841 first->async_tx.cookie = -EBUSY; in mmp_pdma_prep_dma_cyclic()
844 pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys); in mmp_pdma_prep_dma_cyclic()
845 chan->cyclic_first = first; in mmp_pdma_prep_dma_cyclic()
847 return &first->async_tx; in mmp_pdma_prep_dma_cyclic()
851 mmp_pdma_free_desc_list(chan, &first->tx_list); in mmp_pdma_prep_dma_cyclic()
860 u32 maxburst = 0, addr = 0; in mmp_pdma_config_write() local
864 return -EINVAL; in mmp_pdma_config_write()
867 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; in mmp_pdma_config_write()
868 maxburst = cfg->src_maxburst; in mmp_pdma_config_write()
869 width = cfg->src_addr_width; in mmp_pdma_config_write()
870 addr = cfg->src_addr; in mmp_pdma_config_write()
872 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; in mmp_pdma_config_write()
873 maxburst = cfg->dst_maxburst; in mmp_pdma_config_write()
874 width = cfg->dst_addr_width; in mmp_pdma_config_write()
875 addr = cfg->dst_addr; in mmp_pdma_config_write()
879 chan->dcmd |= DCMD_WIDTH1; in mmp_pdma_config_write()
881 chan->dcmd |= DCMD_WIDTH2; in mmp_pdma_config_write()
883 chan->dcmd |= DCMD_WIDTH4; in mmp_pdma_config_write()
885 if (maxburst == 8) in mmp_pdma_config_write()
886 chan->dcmd |= DCMD_BURST8; in mmp_pdma_config_write()
887 else if (maxburst == 16) in mmp_pdma_config_write()
888 chan->dcmd |= DCMD_BURST16; in mmp_pdma_config_write()
889 else if (maxburst == 32) in mmp_pdma_config_write()
890 chan->dcmd |= DCMD_BURST32; in mmp_pdma_config_write()
892 chan->dir = direction; in mmp_pdma_config_write()
893 chan->dev_addr = addr; in mmp_pdma_config_write()
903 memcpy(&chan->slave_config, cfg, sizeof(*cfg)); in mmp_pdma_config()
913 return -EINVAL; in mmp_pdma_terminate_all()
915 disable_chan(chan->phy); in mmp_pdma_terminate_all()
917 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_terminate_all()
918 mmp_pdma_free_desc_list(chan, &chan->chain_pending); in mmp_pdma_terminate_all()
919 mmp_pdma_free_desc_list(chan, &chan->chain_running); in mmp_pdma_terminate_all()
920 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_terminate_all()
921 chan->idle = true; in mmp_pdma_terminate_all()
930 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); in mmp_pdma_residue()
934 bool cyclic = chan->cyclic_first != NULL; in mmp_pdma_residue()
940 if (!chan->phy) in mmp_pdma_residue()
943 if (chan->dir == DMA_DEV_TO_MEM) in mmp_pdma_residue()
944 curr = pdev->ops->read_dst_addr(chan->phy); in mmp_pdma_residue()
946 curr = pdev->ops->read_src_addr(chan->phy); in mmp_pdma_residue()
948 list_for_each_entry(sw, &chan->chain_running, node) { in mmp_pdma_residue()
952 if (chan->dir == DMA_DEV_TO_MEM) in mmp_pdma_residue()
953 start = pdev->ops->get_desc_dst_addr(&sw->desc); in mmp_pdma_residue()
955 start = pdev->ops->get_desc_src_addr(&sw->desc); in mmp_pdma_residue()
957 len = sw->desc.dcmd & DCMD_LENGTH; in mmp_pdma_residue()
971 residue += (u32)(end - curr); in mmp_pdma_residue()
988 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) in mmp_pdma_residue()
991 if (sw->async_tx.cookie == cookie) { in mmp_pdma_residue()
1018 * mmp_pdma_issue_pending - Issue the DMA start command
1026 spin_lock_irqsave(&chan->desc_lock, flags); in mmp_pdma_issue_pending()
1028 spin_unlock_irqrestore(&chan->desc_lock, flags); in mmp_pdma_issue_pending()
1044 if (chan->cyclic_first) { in dma_do_tasklet()
1045 spin_lock_irqsave(&chan->desc_lock, flags); in dma_do_tasklet()
1046 desc = chan->cyclic_first; in dma_do_tasklet()
1047 dmaengine_desc_get_callback(&desc->async_tx, &cb); in dma_do_tasklet()
1048 spin_unlock_irqrestore(&chan->desc_lock, flags); in dma_do_tasklet()
1056 spin_lock_irqsave(&chan->desc_lock, flags); in dma_do_tasklet()
1058 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { in dma_do_tasklet()
1063 list_move(&desc->node, &chain_cleanup); in dma_do_tasklet()
1070 if (desc->desc.dcmd & DCMD_ENDIRQEN) { in dma_do_tasklet()
1071 dma_cookie_t cookie = desc->async_tx.cookie; in dma_do_tasklet()
1072 dma_cookie_complete(&desc->async_tx); in dma_do_tasklet()
1073 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); in dma_do_tasklet()
1082 chan->idle = list_empty(&chan->chain_running); in dma_do_tasklet()
1086 spin_unlock_irqrestore(&chan->desc_lock, flags); in dma_do_tasklet()
1090 struct dma_async_tx_descriptor *txd = &desc->async_tx; in dma_do_tasklet()
1093 list_del(&desc->node); in dma_do_tasklet()
1098 dma_pool_free(chan->desc_pool, desc, txd->phys); in dma_do_tasklet()
1108 if (op->dev.of_node) in mmp_pdma_remove()
1109 of_dma_controller_free(op->dev.of_node); in mmp_pdma_remove()
1111 for (i = 0; i < pdev->dma_channels; i++) { in mmp_pdma_remove()
1116 if (irq_num != pdev->dma_channels) { in mmp_pdma_remove()
1118 devm_free_irq(&op->dev, irq, pdev); in mmp_pdma_remove()
1120 for (i = 0; i < pdev->dma_channels; i++) { in mmp_pdma_remove()
1121 phy = &pdev->phy[i]; in mmp_pdma_remove()
1123 devm_free_irq(&op->dev, irq, phy); in mmp_pdma_remove()
1127 dma_async_device_unregister(&pdev->device); in mmp_pdma_remove()
1132 struct mmp_pdma_phy *phy = &pdev->phy[idx]; in mmp_pdma_chan_init()
1136 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); in mmp_pdma_chan_init()
1138 return -ENOMEM; in mmp_pdma_chan_init()
1140 phy->idx = idx; in mmp_pdma_chan_init()
1141 phy->base = pdev->base; in mmp_pdma_chan_init()
1144 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, in mmp_pdma_chan_init()
1147 dev_err(pdev->dev, "channel request irq fail!\n"); in mmp_pdma_chan_init()
1152 spin_lock_init(&chan->desc_lock); in mmp_pdma_chan_init()
1153 chan->dev = pdev->dev; in mmp_pdma_chan_init()
1154 chan->chan.device = &pdev->device; in mmp_pdma_chan_init()
1155 tasklet_setup(&chan->tasklet, dma_do_tasklet); in mmp_pdma_chan_init()
1156 INIT_LIST_HEAD(&chan->chain_pending); in mmp_pdma_chan_init()
1157 INIT_LIST_HEAD(&chan->chain_running); in mmp_pdma_chan_init()
1159 /* register virt channel to dma engine */ in mmp_pdma_chan_init()
1160 list_add_tail(&chan->chan.device_node, &pdev->device.channels); in mmp_pdma_chan_init()
1175 .dma_mask = 0, /* let OF/platform set DMA mask */
1188 .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
1193 .compatible = "marvell,pdma-1.0",
1196 .compatible = "spacemit,k1-pdma",
1207 struct mmp_pdma_device *d = ofdma->of_dma_data; in mmp_pdma_dma_xlate()
1210 chan = dma_get_any_slave_channel(&d->device); in mmp_pdma_dma_xlate()
1214 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; in mmp_pdma_dma_xlate()
1222 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); in mmp_pdma_probe()
1231 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); in mmp_pdma_probe()
1233 return -ENOMEM; in mmp_pdma_probe()
1235 pdev->dev = &op->dev; in mmp_pdma_probe()
1237 spin_lock_init(&pdev->phy_lock); in mmp_pdma_probe()
1239 pdev->base = devm_platform_ioremap_resource(op, 0); in mmp_pdma_probe()
1240 if (IS_ERR(pdev->base)) in mmp_pdma_probe()
1241 return PTR_ERR(pdev->base); in mmp_pdma_probe()
1243 clk = devm_clk_get_optional_enabled(pdev->dev, NULL); in mmp_pdma_probe()
1247 rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev, in mmp_pdma_probe()
1252 pdev->ops = of_device_get_match_data(&op->dev); in mmp_pdma_probe()
1253 if (!pdev->ops) in mmp_pdma_probe()
1254 return -ENODEV; in mmp_pdma_probe()
1256 if (pdev->dev->of_node) { in mmp_pdma_probe()
1257 /* Parse new and deprecated dma-channels properties */ in mmp_pdma_probe()
1258 if (of_property_read_u32(pdev->dev->of_node, "dma-channels", in mmp_pdma_probe()
1260 of_property_read_u32(pdev->dev->of_node, "#dma-channels", in mmp_pdma_probe()
1262 } else if (pdata && pdata->dma_channels) { in mmp_pdma_probe()
1263 dma_channels = pdata->dma_channels; in mmp_pdma_probe()
1267 pdev->dma_channels = dma_channels; in mmp_pdma_probe()
1274 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), in mmp_pdma_probe()
1276 if (pdev->phy == NULL) in mmp_pdma_probe()
1277 return -ENOMEM; in mmp_pdma_probe()
1279 INIT_LIST_HEAD(&pdev->device.channels); in mmp_pdma_probe()
1284 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, in mmp_pdma_probe()
1297 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); in mmp_pdma_probe()
1298 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); in mmp_pdma_probe()
1299 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); in mmp_pdma_probe()
1300 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); in mmp_pdma_probe()
1301 pdev->device.dev = &op->dev; in mmp_pdma_probe()
1302 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; in mmp_pdma_probe()
1303 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; in mmp_pdma_probe()
1304 pdev->device.device_tx_status = mmp_pdma_tx_status; in mmp_pdma_probe()
1305 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; in mmp_pdma_probe()
1306 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; in mmp_pdma_probe()
1307 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; in mmp_pdma_probe()
1308 pdev->device.device_issue_pending = mmp_pdma_issue_pending; in mmp_pdma_probe()
1309 pdev->device.device_config = mmp_pdma_config; in mmp_pdma_probe()
1310 pdev->device.device_terminate_all = mmp_pdma_terminate_all; in mmp_pdma_probe()
1311 pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; in mmp_pdma_probe()
1312 pdev->device.src_addr_widths = widths; in mmp_pdma_probe()
1313 pdev->device.dst_addr_widths = widths; in mmp_pdma_probe()
1314 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); in mmp_pdma_probe()
1315 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; in mmp_pdma_probe()
1317 /* Set DMA mask based on ops->dma_mask, or OF/platform */ in mmp_pdma_probe()
1318 if (pdev->ops->dma_mask) in mmp_pdma_probe()
1319 dma_set_mask(pdev->dev, pdev->ops->dma_mask); in mmp_pdma_probe()
1320 else if (pdev->dev->coherent_dma_mask) in mmp_pdma_probe()
1321 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); in mmp_pdma_probe()
1323 dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); in mmp_pdma_probe()
1325 ret = dma_async_device_register(&pdev->device); in mmp_pdma_probe()
1327 dev_err(pdev->device.dev, "unable to register\n"); in mmp_pdma_probe()
1331 if (op->dev.of_node) { in mmp_pdma_probe()
1332 /* Device-tree DMA controller registration */ in mmp_pdma_probe()
1333 ret = of_dma_controller_register(op->dev.of_node, in mmp_pdma_probe()
1336 dev_err(&op->dev, "of_dma_controller_register failed\n"); in mmp_pdma_probe()
1337 dma_async_device_unregister(&pdev->device); in mmp_pdma_probe()
1343 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels); in mmp_pdma_probe()
1348 { "mmp-pdma", },
1354 .name = "mmp-pdma",
1364 MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");