Lines Matching +full:ep9301 +full:- +full:dma +full:- +full:m2p

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Cirrus Logic EP93xx DMA Controller
7 * DMA M2P implementation is based on the original
8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
14 * This driver is based on dw_dmac and amba-pl08x drivers.
20 #include <linux/dma-mapping.h>
31 /* M2P registers */
109 * M2P channels.
139 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
164 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
184 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
186 * flattened DMA descriptor chain.
217 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
219 * @m2m: is this an M2M or M2P device
221 * @hw_synchronize: synchronizes DMA channel termination to current context
228 * There is one instance of this struct for the M2P channels and one for the
230 * different on M2M and M2P channels. These methods are called with channel
256 return &edmac->chan.dev->device; in chan2dev()
266 if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p")) in ep93xx_dma_chan_is_m2p()
269 return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); in ep93xx_dma_chan_is_m2p()
273 * ep93xx_dma_chan_direction - returns direction the channel can be used
276 * channel supports given DMA direction. Only M2P channels have such
286 return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; in ep93xx_dma_chan_direction()
290 * ep93xx_dma_set_active - set new active descriptor chain
298 * Called with @edmac->lock held and interrupts disabled.
303 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_set_active()
305 list_add_tail(&desc->node, &edmac->active); in ep93xx_dma_set_active()
307 /* Flatten the @desc->tx_list chain into @edmac->active list */ in ep93xx_dma_set_active()
308 while (!list_empty(&desc->tx_list)) { in ep93xx_dma_set_active()
309 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, in ep93xx_dma_set_active()
318 d->txd.callback = desc->txd.callback; in ep93xx_dma_set_active()
319 d->txd.callback_param = desc->txd.callback_param; in ep93xx_dma_set_active()
321 list_move_tail(&d->node, &edmac->active); in ep93xx_dma_set_active()
325 /* Called with @edmac->lock held and interrupts disabled */
329 return list_first_entry_or_null(&edmac->active, in ep93xx_dma_get_active()
334 * ep93xx_dma_advance_active - advances to the next active descriptor
337 * Function advances active descriptor to the next in the @edmac->active and
343 * Called with @edmac->lock held and interrupts disabled.
349 list_rotate_left(&edmac->active); in ep93xx_dma_advance_active()
351 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_advance_active()
362 return !desc->txd.cookie; in ep93xx_dma_advance_active()
366 * M2P DMA implementation
371 writel(control, edmac->regs + M2P_CONTROL); in m2p_set_control()
376 readl(edmac->regs + M2P_CONTROL); in m2p_set_control()
383 writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC); in m2p_hw_setup()
389 edmac->buffer = 0; in m2p_hw_setup()
396 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; in m2p_channel_state()
404 spin_lock_irqsave(&edmac->lock, flags); in m2p_hw_synchronize()
405 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_synchronize()
408 spin_unlock_irqrestore(&edmac->lock, flags); in m2p_hw_synchronize()
419 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); in m2p_hw_shutdown()
429 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); in m2p_fill_desc()
433 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) in m2p_fill_desc()
434 bus_addr = desc->src_addr; in m2p_fill_desc()
436 bus_addr = desc->dst_addr; in m2p_fill_desc()
438 if (edmac->buffer == 0) { in m2p_fill_desc()
439 writel(desc->size, edmac->regs + M2P_MAXCNT0); in m2p_fill_desc()
440 writel(bus_addr, edmac->regs + M2P_BASE0); in m2p_fill_desc()
442 writel(desc->size, edmac->regs + M2P_MAXCNT1); in m2p_fill_desc()
443 writel(bus_addr, edmac->regs + M2P_BASE1); in m2p_fill_desc()
446 edmac->buffer ^= 1; in m2p_fill_desc()
451 u32 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_submit()
466 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
473 writel(1, edmac->regs + M2P_INTERRUPT); in m2p_hw_interrupt()
484 "DMA transfer failed! Details:\n" in m2p_hw_interrupt()
489 desc->txd.cookie, desc->src_addr, desc->dst_addr, in m2p_hw_interrupt()
490 desc->size); in m2p_hw_interrupt()
507 control = readl(edmac->regs + M2P_CONTROL); in m2p_hw_interrupt()
515 * M2M DMA implementation
522 if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { in m2m_hw_setup()
524 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
528 switch (edmac->dma_cfg.port) { in m2m_hw_setup()
531 * This was found via experimenting - anything less than 5 in m2m_hw_setup()
538 if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) { in m2m_hw_setup()
554 if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) { in m2m_hw_setup()
571 return -EINVAL; in m2m_hw_setup()
574 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_setup()
581 writel(0, edmac->regs + M2M_CONTROL); in m2m_hw_shutdown()
594 if (edmac->buffer == 0) { in m2m_fill_desc()
595 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); in m2m_fill_desc()
596 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); in m2m_fill_desc()
597 writel(desc->size, edmac->regs + M2M_BCR0); in m2m_fill_desc()
599 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); in m2m_fill_desc()
600 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); in m2m_fill_desc()
601 writel(desc->size, edmac->regs + M2M_BCR1); in m2m_fill_desc()
604 edmac->buffer ^= 1; in m2m_fill_desc()
609 u32 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_submit()
617 control |= edmac->runtime_ctrl; in m2m_hw_submit()
632 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
634 if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { in m2m_hw_submit()
640 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_submit()
646 * M2M DMA controller transactions complete normally. This is not always the
647 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
649 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
651 * currently running DMA transfer. To avoid this, we use Buffer FSM and
652 * Control FSM to check current state of DMA channel.
656 u32 status = readl(edmac->regs + M2M_STATUS); in m2m_hw_interrupt()
665 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) in m2m_hw_interrupt()
670 writel(0, edmac->regs + M2M_INTERRUPT); in m2m_hw_interrupt()
675 * with DMA channel state, determines action to take in interrupt. in m2m_hw_interrupt()
678 last_done = !desc || desc->txd.cookie; in m2m_hw_interrupt()
681 * Use M2M DMA Buffer FSM and Control FSM to check current state of in m2m_hw_interrupt()
682 * DMA channel. Using DONE and NFB bits from channel status register in m2m_hw_interrupt()
696 if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) { in m2m_hw_interrupt()
698 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
700 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
716 control = readl(edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
719 writel(control, edmac->regs + M2M_CONTROL); in m2m_hw_interrupt()
730 * DMA engine API implementation
740 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_get()
741 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { in ep93xx_dma_desc_get()
742 if (async_tx_test_ack(&desc->txd)) { in ep93xx_dma_desc_get()
743 list_del_init(&desc->node); in ep93xx_dma_desc_get()
745 /* Re-initialize the descriptor */ in ep93xx_dma_desc_get()
746 desc->src_addr = 0; in ep93xx_dma_desc_get()
747 desc->dst_addr = 0; in ep93xx_dma_desc_get()
748 desc->size = 0; in ep93xx_dma_desc_get()
749 desc->complete = false; in ep93xx_dma_desc_get()
750 desc->txd.cookie = 0; in ep93xx_dma_desc_get()
751 desc->txd.callback = NULL; in ep93xx_dma_desc_get()
752 desc->txd.callback_param = NULL; in ep93xx_dma_desc_get()
758 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_get()
768 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_desc_put()
769 list_splice_init(&desc->tx_list, &edmac->free_list); in ep93xx_dma_desc_put()
770 list_add(&desc->node, &edmac->free_list); in ep93xx_dma_desc_put()
771 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_desc_put()
776 * ep93xx_dma_advance_work - start processing the next pending transaction
780 * function takes the next queued transaction from the @edmac->queue and
788 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_advance_work()
789 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { in ep93xx_dma_advance_work()
790 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
795 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); in ep93xx_dma_advance_work()
796 list_del_init(&new->node); in ep93xx_dma_advance_work()
801 edmac->edma->hw_submit(edmac); in ep93xx_dma_advance_work()
802 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_advance_work()
813 spin_lock_irq(&edmac->lock); in ep93xx_dma_tasklet()
821 if (desc->complete) { in ep93xx_dma_tasklet()
823 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_tasklet()
824 dma_cookie_complete(&desc->txd); in ep93xx_dma_tasklet()
825 list_splice_init(&edmac->active, &list); in ep93xx_dma_tasklet()
827 dmaengine_desc_get_callback(&desc->txd, &cb); in ep93xx_dma_tasklet()
829 spin_unlock_irq(&edmac->lock); in ep93xx_dma_tasklet()
836 dma_descriptor_unmap(&desc->txd); in ep93xx_dma_tasklet()
849 spin_lock(&edmac->lock); in ep93xx_dma_interrupt()
855 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
859 switch (edmac->edma->hw_interrupt(edmac)) { in ep93xx_dma_interrupt()
861 desc->complete = true; in ep93xx_dma_interrupt()
862 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
866 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) in ep93xx_dma_interrupt()
867 tasklet_schedule(&edmac->tasklet); in ep93xx_dma_interrupt()
876 spin_unlock(&edmac->lock); in ep93xx_dma_interrupt()
881 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
890 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); in ep93xx_dma_tx_submit()
895 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_tx_submit()
905 if (list_empty(&edmac->active)) { in ep93xx_dma_tx_submit()
907 edmac->edma->hw_submit(edmac); in ep93xx_dma_tx_submit()
909 list_add_tail(&desc->node, &edmac->queue); in ep93xx_dma_tx_submit()
912 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_tx_submit()
917 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
920 * Function allocates necessary resources for the given DMA channel and
931 if (!edmac->edma->m2m) { in ep93xx_dma_alloc_chan_resources()
932 if (edmac->dma_cfg.port > EP93XX_DMA_IRDA) in ep93xx_dma_alloc_chan_resources()
933 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
934 if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan)) in ep93xx_dma_alloc_chan_resources()
935 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
937 if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) { in ep93xx_dma_alloc_chan_resources()
938 switch (edmac->dma_cfg.port) { in ep93xx_dma_alloc_chan_resources()
941 if (!is_slave_direction(edmac->dma_cfg.dir)) in ep93xx_dma_alloc_chan_resources()
942 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
945 return -EINVAL; in ep93xx_dma_alloc_chan_resources()
950 ret = clk_prepare_enable(edmac->clk); in ep93xx_dma_alloc_chan_resources()
954 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); in ep93xx_dma_alloc_chan_resources()
958 spin_lock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
959 dma_cookie_init(&edmac->chan); in ep93xx_dma_alloc_chan_resources()
960 ret = edmac->edma->hw_setup(edmac); in ep93xx_dma_alloc_chan_resources()
961 spin_unlock_irq(&edmac->lock); in ep93xx_dma_alloc_chan_resources()
975 INIT_LIST_HEAD(&desc->tx_list); in ep93xx_dma_alloc_chan_resources()
977 dma_async_tx_descriptor_init(&desc->txd, chan); in ep93xx_dma_alloc_chan_resources()
978 desc->txd.flags = DMA_CTRL_ACK; in ep93xx_dma_alloc_chan_resources()
979 desc->txd.tx_submit = ep93xx_dma_tx_submit; in ep93xx_dma_alloc_chan_resources()
987 free_irq(edmac->irq, edmac); in ep93xx_dma_alloc_chan_resources()
989 clk_disable_unprepare(edmac->clk); in ep93xx_dma_alloc_chan_resources()
995 * ep93xx_dma_free_chan_resources - release resources for the channel
1008 BUG_ON(!list_empty(&edmac->active)); in ep93xx_dma_free_chan_resources()
1009 BUG_ON(!list_empty(&edmac->queue)); in ep93xx_dma_free_chan_resources()
1011 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
1012 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_free_chan_resources()
1013 edmac->runtime_addr = 0; in ep93xx_dma_free_chan_resources()
1014 edmac->runtime_ctrl = 0; in ep93xx_dma_free_chan_resources()
1015 edmac->buffer = 0; in ep93xx_dma_free_chan_resources()
1016 list_splice_init(&edmac->free_list, &list); in ep93xx_dma_free_chan_resources()
1017 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_free_chan_resources()
1022 clk_disable_unprepare(edmac->clk); in ep93xx_dma_free_chan_resources()
1023 free_irq(edmac->irq, edmac); in ep93xx_dma_free_chan_resources()
1027 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
1034 * Returns a valid DMA descriptor or %NULL in case of failure.
1052 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); in ep93xx_dma_prep_dma_memcpy()
1054 desc->src_addr = src + offset; in ep93xx_dma_prep_dma_memcpy()
1055 desc->dst_addr = dest + offset; in ep93xx_dma_prep_dma_memcpy()
1056 desc->size = bytes; in ep93xx_dma_prep_dma_memcpy()
1061 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_memcpy()
1064 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_memcpy()
1065 first->txd.flags = flags; in ep93xx_dma_prep_dma_memcpy()
1067 return &first->txd; in ep93xx_dma_prep_dma_memcpy()
1074 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1078 * @dir: direction of the DMA transfer
1082 * Returns a valid DMA descriptor or %NULL in case of failure.
1094 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_slave_sg()
1100 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_slave_sg()
1106 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_slave_sg()
1125 desc->src_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1126 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1128 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_slave_sg()
1129 desc->dst_addr = sg_dma_address(sg); in ep93xx_dma_prep_slave_sg()
1131 desc->size = len; in ep93xx_dma_prep_slave_sg()
1136 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_slave_sg()
1139 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_slave_sg()
1140 first->txd.flags = flags; in ep93xx_dma_prep_slave_sg()
1142 return &first->txd; in ep93xx_dma_prep_slave_sg()
1150 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1152 * @dma_addr: DMA mapped address of the buffer
1158 * Prepares a descriptor for cyclic DMA operation. This means that once the
1164 * Returns a valid DMA descriptor or %NULL in case of failure.
1175 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { in ep93xx_dma_prep_dma_cyclic()
1181 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { in ep93xx_dma_prep_dma_cyclic()
1193 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config); in ep93xx_dma_prep_dma_cyclic()
1205 desc->src_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1206 desc->dst_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1208 desc->src_addr = edmac->runtime_addr; in ep93xx_dma_prep_dma_cyclic()
1209 desc->dst_addr = dma_addr + offset; in ep93xx_dma_prep_dma_cyclic()
1212 desc->size = period_len; in ep93xx_dma_prep_dma_cyclic()
1217 list_add_tail(&desc->node, &first->tx_list); in ep93xx_dma_prep_dma_cyclic()
1220 first->txd.cookie = -EBUSY; in ep93xx_dma_prep_dma_cyclic()
1222 return &first->txd; in ep93xx_dma_prep_dma_cyclic()
1230 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1234 * Synchronizes the DMA channel termination to the current context. When this
1245 if (edmac->edma->hw_synchronize) in ep93xx_dma_synchronize()
1246 edmac->edma->hw_synchronize(edmac); in ep93xx_dma_synchronize()
1250 * ep93xx_dma_terminate_all - terminate all transactions
1253 * Stops all DMA transactions. All descriptors are put back to the
1254 * @edmac->free_list and callbacks are _not_ called.
1263 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1264 /* First we disable and flush the DMA channel */ in ep93xx_dma_terminate_all()
1265 edmac->edma->hw_shutdown(edmac); in ep93xx_dma_terminate_all()
1266 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); in ep93xx_dma_terminate_all()
1267 list_splice_init(&edmac->active, &list); in ep93xx_dma_terminate_all()
1268 list_splice_init(&edmac->queue, &list); in ep93xx_dma_terminate_all()
1270 * We then re-enable the channel. This way we can continue submitting in ep93xx_dma_terminate_all()
1271 * the descriptors by just calling ->hw_submit() again. in ep93xx_dma_terminate_all()
1273 edmac->edma->hw_setup(edmac); in ep93xx_dma_terminate_all()
1274 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_terminate_all()
1287 memcpy(&edmac->slave_config, config, sizeof(*config)); in ep93xx_dma_slave_config()
1301 if (!edmac->edma->m2m) in ep93xx_dma_slave_config_write()
1302 return -EINVAL; in ep93xx_dma_slave_config_write()
1306 width = config->src_addr_width; in ep93xx_dma_slave_config_write()
1307 addr = config->src_addr; in ep93xx_dma_slave_config_write()
1311 width = config->dst_addr_width; in ep93xx_dma_slave_config_write()
1312 addr = config->dst_addr; in ep93xx_dma_slave_config_write()
1316 return -EINVAL; in ep93xx_dma_slave_config_write()
1330 return -EINVAL; in ep93xx_dma_slave_config_write()
1333 spin_lock_irqsave(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1334 edmac->runtime_addr = addr; in ep93xx_dma_slave_config_write()
1335 edmac->runtime_ctrl = ctrl; in ep93xx_dma_slave_config_write()
1336 spin_unlock_irqrestore(&edmac->lock, flags); in ep93xx_dma_slave_config_write()
1342 * ep93xx_dma_tx_status - check if a transaction is completed
1357 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1371 struct device *dev = &pdev->dev; in ep93xx_dma_of_probe()
1379 return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n")); in ep93xx_dma_of_probe()
1381 edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels), in ep93xx_dma_of_probe()
1384 return ERR_PTR(-ENOMEM); in ep93xx_dma_of_probe()
1386 edma->m2m = data->id; in ep93xx_dma_of_probe()
1387 edma->num_channels = data->num_channels; in ep93xx_dma_of_probe()
1388 dma_dev = &edma->dma_dev; in ep93xx_dma_of_probe()
1390 INIT_LIST_HEAD(&dma_dev->channels); in ep93xx_dma_of_probe()
1391 for (i = 0; i < edma->num_channels; i++) { in ep93xx_dma_of_probe()
1392 struct ep93xx_dma_chan *edmac = &edma->channels[i]; in ep93xx_dma_of_probe()
1395 edmac->chan.device = dma_dev; in ep93xx_dma_of_probe()
1396 edmac->regs = devm_platform_ioremap_resource(pdev, i); in ep93xx_dma_of_probe()
1397 if (IS_ERR(edmac->regs)) in ep93xx_dma_of_probe()
1398 return ERR_CAST(edmac->regs); in ep93xx_dma_of_probe()
1400 edmac->irq = fwnode_irq_get(dev_fwnode(dev), i); in ep93xx_dma_of_probe()
1401 if (edmac->irq < 0) in ep93xx_dma_of_probe()
1402 return ERR_PTR(edmac->irq); in ep93xx_dma_of_probe()
1404 edmac->edma = edma; in ep93xx_dma_of_probe()
1406 if (edma->m2m) in ep93xx_dma_of_probe()
1409 len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i); in ep93xx_dma_of_probe()
1411 return ERR_PTR(-ENOBUFS); in ep93xx_dma_of_probe()
1413 edmac->clk = devm_clk_get(dev, dma_clk_name); in ep93xx_dma_of_probe()
1414 if (IS_ERR(edmac->clk)) { in ep93xx_dma_of_probe()
1415 dev_err_probe(dev, PTR_ERR(edmac->clk), in ep93xx_dma_of_probe()
1417 return ERR_CAST(edmac->clk); in ep93xx_dma_of_probe()
1420 spin_lock_init(&edmac->lock); in ep93xx_dma_of_probe()
1421 INIT_LIST_HEAD(&edmac->active); in ep93xx_dma_of_probe()
1422 INIT_LIST_HEAD(&edmac->queue); in ep93xx_dma_of_probe()
1423 INIT_LIST_HEAD(&edmac->free_list); in ep93xx_dma_of_probe()
1424 tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet); in ep93xx_dma_of_probe()
1426 list_add_tail(&edmac->chan.device_node, in ep93xx_dma_of_probe()
1427 &dma_dev->channels); in ep93xx_dma_of_probe()
1438 if (cfg->dir != ep93xx_dma_chan_direction(chan)) in ep93xx_m2p_dma_filter()
1441 echan->dma_cfg = *cfg; in ep93xx_m2p_dma_filter()
1448 struct ep93xx_dma_engine *edma = ofdma->of_dma_data; in ep93xx_m2p_dma_of_xlate()
1449 dma_cap_mask_t mask = edma->dma_dev.cap_mask; in ep93xx_m2p_dma_of_xlate()
1451 u8 port = dma_spec->args[0]; in ep93xx_m2p_dma_of_xlate()
1452 u8 direction = dma_spec->args[1]; in ep93xx_m2p_dma_of_xlate()
1463 return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node); in ep93xx_m2p_dma_of_xlate()
1471 echan->dma_cfg = *cfg; in ep93xx_m2m_dma_filter()
1479 struct ep93xx_dma_engine *edma = ofdma->of_dma_data; in ep93xx_m2m_dma_of_xlate()
1480 dma_cap_mask_t mask = edma->dma_dev.cap_mask; in ep93xx_m2m_dma_of_xlate()
1482 u8 port = dma_spec->args[0]; in ep93xx_m2m_dma_of_xlate()
1483 u8 direction = dma_spec->args[1]; in ep93xx_m2m_dma_of_xlate()
1499 return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node); in ep93xx_m2m_dma_of_xlate()
1512 dma_dev = &edma->dma_dev; in ep93xx_dma_probe()
1514 dma_cap_zero(dma_dev->cap_mask); in ep93xx_dma_probe()
1515 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); in ep93xx_dma_probe()
1516 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); in ep93xx_dma_probe()
1518 dma_dev->dev = &pdev->dev; in ep93xx_dma_probe()
1519 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; in ep93xx_dma_probe()
1520 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; in ep93xx_dma_probe()
1521 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; in ep93xx_dma_probe()
1522 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; in ep93xx_dma_probe()
1523 dma_dev->device_config = ep93xx_dma_slave_config; in ep93xx_dma_probe()
1524 dma_dev->device_synchronize = ep93xx_dma_synchronize; in ep93xx_dma_probe()
1525 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; in ep93xx_dma_probe()
1526 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; in ep93xx_dma_probe()
1527 dma_dev->device_tx_status = ep93xx_dma_tx_status; in ep93xx_dma_probe()
1529 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); in ep93xx_dma_probe()
1531 if (edma->m2m) { in ep93xx_dma_probe()
1532 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in ep93xx_dma_probe()
1533 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; in ep93xx_dma_probe()
1535 edma->hw_setup = m2m_hw_setup; in ep93xx_dma_probe()
1536 edma->hw_shutdown = m2m_hw_shutdown; in ep93xx_dma_probe()
1537 edma->hw_submit = m2m_hw_submit; in ep93xx_dma_probe()
1538 edma->hw_interrupt = m2m_hw_interrupt; in ep93xx_dma_probe()
1540 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); in ep93xx_dma_probe()
1542 edma->hw_synchronize = m2p_hw_synchronize; in ep93xx_dma_probe()
1543 edma->hw_setup = m2p_hw_setup; in ep93xx_dma_probe()
1544 edma->hw_shutdown = m2p_hw_shutdown; in ep93xx_dma_probe()
1545 edma->hw_submit = m2p_hw_submit; in ep93xx_dma_probe()
1546 edma->hw_interrupt = m2p_hw_interrupt; in ep93xx_dma_probe()
1553 if (edma->m2m) { in ep93xx_dma_probe()
1554 ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate, in ep93xx_dma_probe()
1557 ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate, in ep93xx_dma_probe()
1563 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P"); in ep93xx_dma_probe()
1584 { .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p },
1585 { .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m },
1591 { "ep93xx-dma-m2p", 0 },
1592 { "ep93xx-dma-m2m", 1 },
1598 .name = "ep93xx-dma",
1608 MODULE_DESCRIPTION("EP93xx DMA driver");