Lines Matching +full:mixed +full:- +full:burst
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
10 #include <linux/dma-mapping.h>
22 #include <dt-bindings/dma/nbpfaxi.h>
104 * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
108 * allocated from coherent memory - one per SG segment
111 * together with link descriptors as mixed (DMA / CPU) objects, or (b)
115 * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
142 * struct nbpf_desc - DMA transfer descriptor
161 #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \
175 * struct nbpf_channel - one DMAC channel
184 * @slave_src_burst: maximum source slave burst size in bytes
187 * @slave_dst_burst: maximum destination slave burst size in bytes
189 * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
196 * @done: list of completed descriptors, waiting post-processing
197 * @desc_page: list of additionally allocated descriptor pages - if any
295 * we try to separate the hardware-specific part from the (largely) generic
302 /* Hardware-specific part */
307 u32 data = ioread32(chan->base + offset); in nbpf_chan_read()
308 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_read()
309 __func__, chan->base, offset, data); in nbpf_chan_read()
316 iowrite32(data, chan->base + offset); in nbpf_chan_write()
317 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_write()
318 __func__, chan->base, offset, data); in nbpf_chan_write()
324 u32 data = ioread32(nbpf->base + offset); in nbpf_read()
325 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_read()
326 __func__, nbpf->base, offset, data); in nbpf_read()
333 iowrite32(data, nbpf->base + offset); in nbpf_write()
334 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_write()
335 __func__, nbpf->base, offset, data); in nbpf_write()
345 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); in nbpf_status_get()
347 return status & BIT(chan - chan->nbpf->chan); in nbpf_status_get()
362 return nbpf->chan + __ffs(error); in nbpf_error_get_channel()
373 for (i = 1000; i; i--) { in nbpf_error_clear()
381 dev_err(chan->dma_chan.device->dev, in nbpf_error_clear()
389 struct nbpf_channel *chan = desc->chan; in nbpf_start()
390 struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); in nbpf_start()
392 nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); in nbpf_start()
394 chan->paused = false; in nbpf_start()
396 /* Software trigger MEMCPY - only MEMCPY uses the block mode */ in nbpf_start()
397 if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) in nbpf_start()
400 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, in nbpf_start()
408 chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | in nbpf_chan_prepare()
409 (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | in nbpf_chan_prepare()
410 (chan->flags & NBPF_SLAVE_RQ_LEVEL ? in nbpf_chan_prepare()
412 chan->terminal; in nbpf_chan_prepare()
418 chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; in nbpf_chan_prepare_default()
419 chan->terminal = 0; in nbpf_chan_prepare_default()
420 chan->flags = 0; in nbpf_chan_prepare_default()
428 * per-transfer configuration will be loaded from transfer descriptors. in nbpf_chan_configure()
430 nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); in nbpf_chan_configure()
436 int max_burst = nbpf->config->buffer_size * 8; in nbpf_xfer_ds()
438 if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) { in nbpf_xfer_ds()
441 max_burst = min_not_zero(nbpf->max_burst_mem_read, in nbpf_xfer_ds()
442 nbpf->max_burst_mem_write); in nbpf_xfer_ds()
445 if (nbpf->max_burst_mem_read) in nbpf_xfer_ds()
446 max_burst = nbpf->max_burst_mem_read; in nbpf_xfer_ds()
449 if (nbpf->max_burst_mem_write) in nbpf_xfer_ds()
450 max_burst = nbpf->max_burst_mem_write; in nbpf_xfer_ds()
463 enum dma_slave_buswidth width, u32 burst) in nbpf_xfer_size() argument
467 if (!burst) in nbpf_xfer_size()
468 burst = 1; in nbpf_xfer_size()
472 size = 8 * burst; in nbpf_xfer_size()
476 size = 4 * burst; in nbpf_xfer_size()
480 size = 2 * burst; in nbpf_xfer_size()
487 size = burst; in nbpf_xfer_size()
498 * a timeout, if no more data arrives - receive what's already there. We want to
501 * to recognise such slaves. We use a data-width check to distinguish between
509 struct nbpf_link_reg *hwdesc = ldesc->hwdesc; in nbpf_prep_one()
510 struct nbpf_desc *desc = ldesc->desc; in nbpf_prep_one()
511 struct nbpf_channel *chan = desc->chan; in nbpf_prep_one()
512 struct device *dev = chan->dma_chan.device->dev; in nbpf_prep_one()
516 hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | in nbpf_prep_one()
519 hwdesc->src_addr = src; in nbpf_prep_one()
520 hwdesc->dst_addr = dst; in nbpf_prep_one()
521 hwdesc->transaction_size = size; in nbpf_prep_one()
532 * e.g. with serial drivers like amba-pl011.c. For reception it sets up in nbpf_prep_one()
535 * in the Rx FIFO. For this to work with the RAM side using burst in nbpf_prep_one()
539 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction); in nbpf_prep_one()
543 can_burst = chan->slave_src_width >= 3; in nbpf_prep_one()
545 chan->slave_src_burst : chan->slave_src_width); in nbpf_prep_one()
550 if (mem_xfer > chan->slave_src_burst && !can_burst) in nbpf_prep_one()
551 mem_xfer = chan->slave_src_burst; in nbpf_prep_one()
552 /* Device-to-RAM DMA is unreliable without REQD set */ in nbpf_prep_one()
553 hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | in nbpf_prep_one()
559 slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? in nbpf_prep_one()
560 chan->slave_dst_burst : chan->slave_dst_width); in nbpf_prep_one()
561 hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | in nbpf_prep_one()
566 hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | in nbpf_prep_one()
572 return -EINVAL; in nbpf_prep_one()
575 hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | in nbpf_prep_one()
578 dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", in nbpf_prep_one()
579 __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, in nbpf_prep_one()
580 hwdesc->config, size, &src, &dst); in nbpf_prep_one()
582 dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), in nbpf_prep_one()
606 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); in nbpf_issue_pending()
608 spin_lock_irqsave(&chan->lock, flags); in nbpf_issue_pending()
609 if (list_empty(&chan->queued)) in nbpf_issue_pending()
612 list_splice_tail_init(&chan->queued, &chan->active); in nbpf_issue_pending()
614 if (!chan->running) { in nbpf_issue_pending()
615 struct nbpf_desc *desc = list_first_entry(&chan->active, in nbpf_issue_pending()
618 chan->running = desc; in nbpf_issue_pending()
622 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_issue_pending()
635 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_status()
636 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status()
639 state->residue = nbpf_bytes_left(chan); in nbpf_tx_status()
640 dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, in nbpf_tx_status()
641 state->residue); in nbpf_tx_status()
646 list_for_each_entry(desc, &chan->active, node) in nbpf_tx_status()
647 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
653 list_for_each_entry(desc, &chan->queued, node) in nbpf_tx_status()
654 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
660 state->residue = found ? desc->length : 0; in nbpf_tx_status()
663 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_status()
666 if (chan->paused) in nbpf_tx_status()
675 struct nbpf_channel *chan = desc->chan; in nbpf_tx_submit()
679 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_submit()
681 list_add_tail(&desc->node, &chan->queued); in nbpf_tx_submit()
682 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_submit()
684 dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); in nbpf_tx_submit()
691 struct dma_chan *dchan = &chan->dma_chan; in nbpf_desc_page_alloc()
699 struct device *dev = dchan->device->dev; in nbpf_desc_page_alloc()
702 return -ENOMEM; in nbpf_desc_page_alloc()
707 for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; in nbpf_desc_page_alloc()
708 i < ARRAY_SIZE(dpage->ldesc); in nbpf_desc_page_alloc()
710 ldesc->hwdesc = hwdesc; in nbpf_desc_page_alloc()
711 list_add_tail(&ldesc->node, &lhead); in nbpf_desc_page_alloc()
712 ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, in nbpf_desc_page_alloc()
714 if (dma_mapping_error(dchan->device->dev, in nbpf_desc_page_alloc()
715 ldesc->hwdesc_dma_addr)) in nbpf_desc_page_alloc()
719 hwdesc, &ldesc->hwdesc_dma_addr); in nbpf_desc_page_alloc()
722 for (i = 0, desc = dpage->desc; in nbpf_desc_page_alloc()
723 i < ARRAY_SIZE(dpage->desc); in nbpf_desc_page_alloc()
725 dma_async_tx_descriptor_init(&desc->async_tx, dchan); in nbpf_desc_page_alloc()
726 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc()
727 desc->chan = chan; in nbpf_desc_page_alloc()
728 INIT_LIST_HEAD(&desc->sg); in nbpf_desc_page_alloc()
729 list_add_tail(&desc->node, &head); in nbpf_desc_page_alloc()
736 spin_lock_irq(&chan->lock); in nbpf_desc_page_alloc()
737 list_splice_tail(&lhead, &chan->free_links); in nbpf_desc_page_alloc()
738 list_splice_tail(&head, &chan->free); in nbpf_desc_page_alloc()
739 list_add(&dpage->node, &chan->desc_page); in nbpf_desc_page_alloc()
740 spin_unlock_irq(&chan->lock); in nbpf_desc_page_alloc()
742 return ARRAY_SIZE(dpage->desc); in nbpf_desc_page_alloc()
745 while (i--) { in nbpf_desc_page_alloc()
746 ldesc--; hwdesc--; in nbpf_desc_page_alloc()
748 dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, in nbpf_desc_page_alloc()
752 return -ENOMEM; in nbpf_desc_page_alloc()
757 struct nbpf_channel *chan = desc->chan; in nbpf_desc_put()
761 spin_lock_irqsave(&chan->lock, flags); in nbpf_desc_put()
762 list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) in nbpf_desc_put()
763 list_move(&ldesc->node, &chan->free_links); in nbpf_desc_put()
765 list_add(&desc->node, &chan->free); in nbpf_desc_put()
766 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_desc_put()
775 spin_lock_irqsave(&chan->lock, flags); in nbpf_scan_acked()
776 list_for_each_entry_safe(desc, tmp, &chan->done, node) in nbpf_scan_acked()
777 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { in nbpf_scan_acked()
778 list_move(&desc->node, &head); in nbpf_scan_acked()
779 desc->user_wait = false; in nbpf_scan_acked()
781 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_scan_acked()
784 list_del(&desc->node); in nbpf_scan_acked()
791 * before we re-acquire the lock buffers can be taken already, so we have to
792 * re-check after re-acquiring the lock and possibly retry, if buffers are gone
802 spin_lock_irq(&chan->lock); in nbpf_desc_get()
807 if (list_empty(&chan->free)) { in nbpf_desc_get()
809 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
813 spin_lock_irq(&chan->lock); in nbpf_desc_get()
816 desc = list_first_entry(&chan->free, struct nbpf_desc, node); in nbpf_desc_get()
817 list_del(&desc->node); in nbpf_desc_get()
820 if (list_empty(&chan->free_links)) { in nbpf_desc_get()
822 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
828 spin_lock_irq(&chan->lock); in nbpf_desc_get()
832 ldesc = list_first_entry(&chan->free_links, in nbpf_desc_get()
834 ldesc->desc = desc; in nbpf_desc_get()
836 prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; in nbpf_desc_get()
839 list_move_tail(&ldesc->node, &desc->sg); in nbpf_desc_get()
845 prev->hwdesc->next = 0; in nbpf_desc_get()
847 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
858 spin_lock_irqsave(&chan->lock, flags); in nbpf_chan_idle()
860 list_splice_init(&chan->done, &head); in nbpf_chan_idle()
861 list_splice_init(&chan->active, &head); in nbpf_chan_idle()
862 list_splice_init(&chan->queued, &head); in nbpf_chan_idle()
864 chan->running = NULL; in nbpf_chan_idle()
866 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_chan_idle()
869 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", in nbpf_chan_idle()
870 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle()
871 list_del(&desc->node); in nbpf_chan_idle()
880 dev_dbg(dchan->device->dev, "Entry %s\n", __func__); in nbpf_pause()
882 chan->paused = true; in nbpf_pause()
894 dev_dbg(dchan->device->dev, "Entry %s\n", __func__); in nbpf_terminate_all()
895 dev_dbg(dchan->device->dev, "Terminating\n"); in nbpf_terminate_all()
908 dev_dbg(dchan->device->dev, "Entry %s\n", __func__); in nbpf_config()
911 * We could check config->slave_id to match chan->terminal here, in nbpf_config()
916 chan->slave_dst_addr = config->dst_addr; in nbpf_config()
917 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
918 config->dst_addr_width, 1); in nbpf_config()
919 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
920 config->dst_addr_width, in nbpf_config()
921 config->dst_maxburst); in nbpf_config()
922 chan->slave_src_addr = config->src_addr; in nbpf_config()
923 chan->slave_src_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
924 config->src_addr_width, 1); in nbpf_config()
925 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
926 config->src_addr_width, in nbpf_config()
927 config->src_maxburst); in nbpf_config()
968 desc->async_tx.flags = flags; in nbpf_prep_sg()
969 desc->async_tx.cookie = -EBUSY; in nbpf_prep_sg()
970 desc->user_wait = false; in nbpf_prep_sg()
976 list_for_each_entry(ldesc, &desc->sg, node) { in nbpf_prep_sg()
981 i == len - 1); in nbpf_prep_sg()
995 desc->length = data_len; in nbpf_prep_sg()
998 return &desc->async_tx; in nbpf_prep_sg()
1018 dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", in nbpf_prep_memcpy()
1032 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); in nbpf_prep_slave_sg()
1038 sg_dma_address(&slave_sg) = chan->slave_dst_addr; in nbpf_prep_slave_sg()
1043 sg_dma_address(&slave_sg) = chan->slave_src_addr; in nbpf_prep_slave_sg()
1057 INIT_LIST_HEAD(&chan->free); in nbpf_alloc_chan_resources()
1058 INIT_LIST_HEAD(&chan->free_links); in nbpf_alloc_chan_resources()
1059 INIT_LIST_HEAD(&chan->queued); in nbpf_alloc_chan_resources()
1060 INIT_LIST_HEAD(&chan->active); in nbpf_alloc_chan_resources()
1061 INIT_LIST_HEAD(&chan->done); in nbpf_alloc_chan_resources()
1067 dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, in nbpf_alloc_chan_resources()
1068 chan->terminal); in nbpf_alloc_chan_resources()
1080 dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); in nbpf_free_chan_resources()
1084 /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ in nbpf_free_chan_resources()
1087 list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { in nbpf_free_chan_resources()
1090 list_del(&dpage->node); in nbpf_free_chan_resources()
1091 for (i = 0, ldesc = dpage->ldesc; in nbpf_free_chan_resources()
1092 i < ARRAY_SIZE(dpage->ldesc); in nbpf_free_chan_resources()
1094 dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, in nbpf_free_chan_resources()
1095 sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); in nbpf_free_chan_resources()
1103 struct nbpf_device *nbpf = ofdma->of_dma_data; in nbpf_of_xlate()
1107 if (dma_spec->args_count != 2) in nbpf_of_xlate()
1110 dchan = dma_get_any_slave_channel(&nbpf->dma_dev); in nbpf_of_xlate()
1114 dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__, in nbpf_of_xlate()
1115 dma_spec->np); in nbpf_of_xlate()
1119 chan->terminal = dma_spec->args[0]; in nbpf_of_xlate()
1120 chan->flags = dma_spec->args[1]; in nbpf_of_xlate()
1134 while (!list_empty(&chan->done)) { in nbpf_chan_tasklet()
1137 spin_lock_irq(&chan->lock); in nbpf_chan_tasklet()
1139 list_for_each_entry_safe(desc, tmp, &chan->done, node) { in nbpf_chan_tasklet()
1140 if (!desc->user_wait) { in nbpf_chan_tasklet()
1144 } else if (async_tx_test_ack(&desc->async_tx)) { in nbpf_chan_tasklet()
1149 list_del(&desc->node); in nbpf_chan_tasklet()
1150 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1162 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1166 dma_cookie_complete(&desc->async_tx); in nbpf_chan_tasklet()
1172 if (async_tx_test_ack(&desc->async_tx)) { in nbpf_chan_tasklet()
1173 list_del(&desc->node); in nbpf_chan_tasklet()
1176 desc->user_wait = true; in nbpf_chan_tasklet()
1180 dmaengine_desc_get_callback(&desc->async_tx, &cb); in nbpf_chan_tasklet()
1183 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1205 dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); in nbpf_chan_irq()
1207 spin_lock(&chan->lock); in nbpf_chan_irq()
1208 desc = chan->running; in nbpf_chan_irq()
1217 list_move_tail(&desc->node, &chan->done); in nbpf_chan_irq()
1218 chan->running = NULL; in nbpf_chan_irq()
1220 if (!list_empty(&chan->active)) { in nbpf_chan_irq()
1221 desc = list_first_entry(&chan->active, in nbpf_chan_irq()
1224 chan->running = desc; in nbpf_chan_irq()
1228 spin_unlock(&chan->lock); in nbpf_chan_irq()
1231 tasklet_schedule(&chan->tasklet); in nbpf_chan_irq()
1241 dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); in nbpf_err_irq()
1259 struct dma_device *dma_dev = &nbpf->dma_dev; in nbpf_chan_probe()
1260 struct nbpf_channel *chan = nbpf->chan + n; in nbpf_chan_probe()
1263 chan->nbpf = nbpf; in nbpf_chan_probe()
1264 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; in nbpf_chan_probe()
1265 INIT_LIST_HEAD(&chan->desc_page); in nbpf_chan_probe()
1266 spin_lock_init(&chan->lock); in nbpf_chan_probe()
1267 chan->dma_chan.device = dma_dev; in nbpf_chan_probe()
1268 dma_cookie_init(&chan->dma_chan); in nbpf_chan_probe()
1271 dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); in nbpf_chan_probe()
1273 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); in nbpf_chan_probe()
1275 tasklet_setup(&chan->tasklet, nbpf_chan_tasklet); in nbpf_chan_probe()
1276 ret = devm_request_irq(dma_dev->dev, chan->irq, in nbpf_chan_probe()
1278 chan->name, chan); in nbpf_chan_probe()
1283 list_add_tail(&chan->dma_chan.device_node, in nbpf_chan_probe()
1284 &dma_dev->channels); in nbpf_chan_probe()
1305 struct device *dev = &pdev->dev; in nbpf_probe()
1306 struct device_node *np = dev->of_node; in nbpf_probe()
1319 return -ENODEV; in nbpf_probe()
1322 num_channels = cfg->num_channels; in nbpf_probe()
1327 return -ENOMEM; in nbpf_probe()
1329 dma_dev = &nbpf->dma_dev; in nbpf_probe()
1330 dma_dev->dev = dev; in nbpf_probe()
1332 nbpf->base = devm_platform_ioremap_resource(pdev, 0); in nbpf_probe()
1333 if (IS_ERR(nbpf->base)) in nbpf_probe()
1334 return PTR_ERR(nbpf->base); in nbpf_probe()
1336 nbpf->clk = devm_clk_get(dev, NULL); in nbpf_probe()
1337 if (IS_ERR(nbpf->clk)) in nbpf_probe()
1338 return PTR_ERR(nbpf->clk); in nbpf_probe()
1340 of_property_read_u32(np, "max-burst-mem-read", in nbpf_probe()
1341 &nbpf->max_burst_mem_read); in nbpf_probe()
1342 of_property_read_u32(np, "max-burst-mem-write", in nbpf_probe()
1343 &nbpf->max_burst_mem_write); in nbpf_probe()
1345 nbpf->config = cfg; in nbpf_probe()
1349 if (irq < 0 && irq != -ENXIO) in nbpf_probe()
1362 return -ENXIO; in nbpf_probe()
1368 nbpf->chan[i].irq = irqbuf[0]; in nbpf_probe()
1377 for (i = 0, chan = nbpf->chan; i < num_channels; in nbpf_probe()
1383 return -EINVAL; in nbpf_probe()
1384 chan->irq = irqbuf[i]; in nbpf_probe()
1394 nbpf->chan[i].irq = irq; in nbpf_probe()
1402 nbpf->eirq = eirq; in nbpf_probe()
1404 INIT_LIST_HEAD(&dma_dev->channels); in nbpf_probe()
1413 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in nbpf_probe()
1414 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); in nbpf_probe()
1415 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); in nbpf_probe()
1418 dma_dev->device_alloc_chan_resources in nbpf_probe()
1420 dma_dev->device_free_chan_resources = nbpf_free_chan_resources; in nbpf_probe()
1421 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; in nbpf_probe()
1422 dma_dev->device_tx_status = nbpf_tx_status; in nbpf_probe()
1423 dma_dev->device_issue_pending = nbpf_issue_pending; in nbpf_probe()
1428 * dma_dev->copy_align = 4; in nbpf_probe()
1434 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; in nbpf_probe()
1435 dma_dev->device_config = nbpf_config; in nbpf_probe()
1436 dma_dev->device_pause = nbpf_pause; in nbpf_probe()
1437 dma_dev->device_terminate_all = nbpf_terminate_all; in nbpf_probe()
1439 dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS; in nbpf_probe()
1440 dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS; in nbpf_probe()
1441 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in nbpf_probe()
1445 ret = clk_prepare_enable(nbpf->clk); in nbpf_probe()
1464 clk_disable_unprepare(nbpf->clk); in nbpf_probe()
1474 devm_free_irq(&pdev->dev, nbpf->eirq, nbpf); in nbpf_remove()
1476 for (i = 0; i < nbpf->config->num_channels; i++) { in nbpf_remove()
1477 struct nbpf_channel *chan = nbpf->chan + i; in nbpf_remove()
1479 devm_free_irq(&pdev->dev, chan->irq, chan); in nbpf_remove()
1481 tasklet_kill(&chan->tasklet); in nbpf_remove()
1484 of_dma_controller_free(pdev->dev.of_node); in nbpf_remove()
1485 dma_async_device_unregister(&nbpf->dma_dev); in nbpf_remove()
1486 clk_disable_unprepare(nbpf->clk); in nbpf_remove()
1507 clk_disable_unprepare(nbpf->clk); in nbpf_runtime_suspend()
1514 return clk_prepare_enable(nbpf->clk); in nbpf_runtime_resume()
1524 .name = "dma-nbpf",