Lines Matching refs:cdd

88 	struct cppi41_dd *cdd;  member
254 static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc) in desc_to_chan() argument
262 if (!((desc >= cdd->descs_phys) && in desc_to_chan()
263 (desc < (cdd->descs_phys + descs_size)))) { in desc_to_chan()
267 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc); in desc_to_chan()
269 c = cdd->chan_busy[desc_num]; in desc_to_chan()
270 cdd->chan_busy[desc_num] = NULL; in desc_to_chan()
273 pm_runtime_put(cdd->ddev.dev); in desc_to_chan()
293 static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) in cppi41_pop_desc() argument
297 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); in cppi41_pop_desc()
304 struct cppi41_dd *cdd = data; in cppi41_irq() local
305 u16 first_completion_queue = cdd->first_completion_queue; in cppi41_irq()
306 u16 qmgr_num_pend = cdd->qmgr_num_pend; in cppi41_irq()
315 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); in cppi41_irq()
336 WARN_ON(cdd->is_suspended); in cppi41_irq()
341 desc = cppi41_pop_desc(cdd, q_num); in cppi41_irq()
342 c = desc_to_chan(cdd, desc); in cppi41_irq()
374 struct cppi41_dd *cdd = c->cdd; in cppi41_dma_alloc_chan_resources() local
377 error = pm_runtime_get_sync(cdd->ddev.dev); in cppi41_dma_alloc_chan_resources()
379 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", in cppi41_dma_alloc_chan_resources()
381 pm_runtime_put_noidle(cdd->ddev.dev); in cppi41_dma_alloc_chan_resources()
393 pm_runtime_mark_last_busy(cdd->ddev.dev); in cppi41_dma_alloc_chan_resources()
394 pm_runtime_put_autosuspend(cdd->ddev.dev); in cppi41_dma_alloc_chan_resources()
402 struct cppi41_dd *cdd = c->cdd; in cppi41_dma_free_chan_resources() local
405 error = pm_runtime_get_sync(cdd->ddev.dev); in cppi41_dma_free_chan_resources()
407 pm_runtime_put_noidle(cdd->ddev.dev); in cppi41_dma_free_chan_resources()
412 WARN_ON(!list_empty(&cdd->pending)); in cppi41_dma_free_chan_resources()
414 pm_runtime_mark_last_busy(cdd->ddev.dev); in cppi41_dma_free_chan_resources()
415 pm_runtime_put_autosuspend(cdd->ddev.dev); in cppi41_dma_free_chan_resources()
433 struct cppi41_dd *cdd = c->cdd; in push_desc_queue() local
463 pm_runtime_get(cdd->ddev.dev); in push_desc_queue()
466 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); in push_desc_queue()
467 WARN_ON(cdd->chan_busy[desc_num]); in push_desc_queue()
468 cdd->chan_busy[desc_num] = c; in push_desc_queue()
472 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); in push_desc_queue()
480 static void cppi41_run_queue(struct cppi41_dd *cdd) in cppi41_run_queue() argument
484 list_for_each_entry_safe(c, _c, &cdd->pending, node) { in cppi41_run_queue()
493 struct cppi41_dd *cdd = c->cdd; in cppi41_dma_issue_pending() local
497 error = pm_runtime_get(cdd->ddev.dev); in cppi41_dma_issue_pending()
499 pm_runtime_put_noidle(cdd->ddev.dev); in cppi41_dma_issue_pending()
500 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", in cppi41_dma_issue_pending()
506 spin_lock_irqsave(&cdd->lock, flags); in cppi41_dma_issue_pending()
507 list_add_tail(&c->node, &cdd->pending); in cppi41_dma_issue_pending()
508 if (!cdd->is_suspended) in cppi41_dma_issue_pending()
509 cppi41_run_queue(cdd); in cppi41_dma_issue_pending()
510 spin_unlock_irqrestore(&cdd->lock, flags); in cppi41_dma_issue_pending()
512 pm_runtime_mark_last_busy(cdd->ddev.dev); in cppi41_dma_issue_pending()
513 pm_runtime_put_autosuspend(cdd->ddev.dev); in cppi41_dma_issue_pending()
590 struct cppi41_dd *cdd = c->cdd; in cppi41_dma_prep_slave_sg() local
596 error = pm_runtime_get(cdd->ddev.dev); in cppi41_dma_prep_slave_sg()
598 pm_runtime_put_noidle(cdd->ddev.dev); in cppi41_dma_prep_slave_sg()
603 if (cdd->is_suspended) in cppi41_dma_prep_slave_sg()
630 pm_runtime_mark_last_busy(cdd->ddev.dev); in cppi41_dma_prep_slave_sg()
631 pm_runtime_put_autosuspend(cdd->ddev.dev); in cppi41_dma_prep_slave_sg()
644 struct cppi41_dd *cdd = c->cdd; in cppi41_tear_down_chan() local
650 td = cdd->cd; in cppi41_tear_down_chan()
651 td += cdd->first_td_desc; in cppi41_tear_down_chan()
653 td_desc_phys = cdd->descs_phys; in cppi41_tear_down_chan()
654 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc); in cppi41_tear_down_chan()
662 cppi_writel(reg, cdd->qmgr_mem + in cppi41_tear_down_chan()
663 QMGR_QUEUE_D(cdd->td_queue.submit)); in cppi41_tear_down_chan()
669 reg |= cdd->td_queue.complete; in cppi41_tear_down_chan()
679 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); in cppi41_tear_down_chan()
681 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); in cppi41_tear_down_chan()
717 desc_phys = cppi41_pop_desc(cdd, c->q_num); in cppi41_tear_down_chan()
719 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); in cppi41_tear_down_chan()
739 struct cppi41_dd *cdd = c->cdd; in cppi41_stop_chan() local
745 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); in cppi41_stop_chan()
746 if (!cdd->chan_busy[desc_num]) { in cppi41_stop_chan()
754 list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { in cppi41_stop_chan()
767 WARN_ON(!cdd->chan_busy[desc_num]); in cppi41_stop_chan()
768 cdd->chan_busy[desc_num] = NULL; in cppi41_stop_chan()
771 pm_runtime_put(cdd->ddev.dev); in cppi41_stop_chan()
776 static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) in cppi41_add_chans() argument
780 u32 n_chans = cdd->n_chans; in cppi41_add_chans()
795 cchan->cdd = cdd; in cppi41_add_chans()
797 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1); in cppi41_add_chans()
800 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1); in cppi41_add_chans()
804 cchan->desc = &cdd->cd[i]; in cppi41_add_chans()
805 cchan->desc_phys = cdd->descs_phys; in cppi41_add_chans()
807 cchan->chan.device = &cdd->ddev; in cppi41_add_chans()
808 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels); in cppi41_add_chans()
810 cdd->first_td_desc = n_chans; in cppi41_add_chans()
815 static void purge_descs(struct device *dev, struct cppi41_dd *cdd) in purge_descs() argument
824 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); in purge_descs()
825 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); in purge_descs()
827 dma_free_coherent(dev, mem_decs, cdd->cd, in purge_descs()
828 cdd->descs_phys); in purge_descs()
832 static void disable_sched(struct cppi41_dd *cdd) in disable_sched() argument
834 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); in disable_sched()
837 static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) in deinit_cppi41() argument
839 disable_sched(cdd); in deinit_cppi41()
841 purge_descs(dev, cdd); in deinit_cppi41()
843 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); in deinit_cppi41()
844 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); in deinit_cppi41()
845 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, in deinit_cppi41()
846 cdd->scratch_phys); in deinit_cppi41()
849 static int init_descs(struct device *dev, struct cppi41_dd *cdd) in init_descs() argument
873 cdd->cd = dma_alloc_coherent(dev, mem_decs, in init_descs()
874 &cdd->descs_phys, GFP_KERNEL); in init_descs()
875 if (!cdd->cd) in init_descs()
878 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); in init_descs()
879 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i)); in init_descs()
886 static void init_sched(struct cppi41_dd *cdd) in init_sched() argument
893 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); in init_sched()
894 for (ch = 0; ch < cdd->n_chans; ch += 2) { in init_sched()
901 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); in init_sched()
904 reg = cdd->n_chans * 2 - 1; in init_sched()
906 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); in init_sched()
909 static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) in init_cppi41() argument
914 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, in init_cppi41()
915 &cdd->scratch_phys, GFP_KERNEL); in init_cppi41()
916 if (!cdd->qmgr_scratch) in init_cppi41()
919 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); in init_cppi41()
920 cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE); in init_cppi41()
921 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); in init_cppi41()
923 ret = init_descs(dev, cdd); in init_cppi41()
927 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); in init_cppi41()
928 init_sched(cdd); in init_cppi41()
932 deinit_cppi41(dev, cdd); in init_cppi41()
949 struct cppi41_dd *cdd; in cpp41_dma_filter_fn() local
963 cdd = cchan->cdd; in cpp41_dma_filter_fn()
965 queues = cdd->queues_tx; in cpp41_dma_filter_fn()
967 queues = cdd->queues_rx; in cpp41_dma_filter_fn()
1039 struct cppi41_dd *cdd; in cppi41_dma_probe() local
1050 cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL); in cppi41_dma_probe()
1051 if (!cdd) in cppi41_dma_probe()
1054 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask); in cppi41_dma_probe()
1055 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources; in cppi41_dma_probe()
1056 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources; in cppi41_dma_probe()
1057 cdd->ddev.device_tx_status = cppi41_dma_tx_status; in cppi41_dma_probe()
1058 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; in cppi41_dma_probe()
1059 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; in cppi41_dma_probe()
1060 cdd->ddev.device_terminate_all = cppi41_stop_chan; in cppi41_dma_probe()
1061 cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in cppi41_dma_probe()
1062 cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS; in cppi41_dma_probe()
1063 cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS; in cppi41_dma_probe()
1064 cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in cppi41_dma_probe()
1065 cdd->ddev.dev = dev; in cppi41_dma_probe()
1066 INIT_LIST_HEAD(&cdd->ddev.channels); in cppi41_dma_probe()
1067 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; in cppi41_dma_probe()
1074 cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index); in cppi41_dma_probe()
1075 if (IS_ERR(cdd->ctrl_mem)) in cppi41_dma_probe()
1076 return PTR_ERR(cdd->ctrl_mem); in cppi41_dma_probe()
1078 cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1); in cppi41_dma_probe()
1079 if (IS_ERR(cdd->sched_mem)) in cppi41_dma_probe()
1080 return PTR_ERR(cdd->sched_mem); in cppi41_dma_probe()
1082 cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2); in cppi41_dma_probe()
1083 if (IS_ERR(cdd->qmgr_mem)) in cppi41_dma_probe()
1084 return PTR_ERR(cdd->qmgr_mem); in cppi41_dma_probe()
1086 spin_lock_init(&cdd->lock); in cppi41_dma_probe()
1087 INIT_LIST_HEAD(&cdd->pending); in cppi41_dma_probe()
1089 platform_set_drvdata(pdev, cdd); in cppi41_dma_probe()
1098 cdd->queues_rx = glue_info->queues_rx; in cppi41_dma_probe()
1099 cdd->queues_tx = glue_info->queues_tx; in cppi41_dma_probe()
1100 cdd->td_queue = glue_info->td_queue; in cppi41_dma_probe()
1101 cdd->qmgr_num_pend = glue_info->qmgr_num_pend; in cppi41_dma_probe()
1102 cdd->first_completion_queue = glue_info->first_completion_queue; in cppi41_dma_probe()
1106 "dma-channels", &cdd->n_chans); in cppi41_dma_probe()
1109 "#dma-channels", &cdd->n_chans); in cppi41_dma_probe()
1113 ret = init_cppi41(dev, cdd); in cppi41_dma_probe()
1117 ret = cppi41_add_chans(dev, cdd); in cppi41_dma_probe()
1128 dev_name(dev), cdd); in cppi41_dma_probe()
1131 cdd->irq = irq; in cppi41_dma_probe()
1133 ret = dma_async_device_register(&cdd->ddev); in cppi41_dma_probe()
1147 dma_async_device_unregister(&cdd->ddev); in cppi41_dma_probe()
1149 deinit_cppi41(dev, cdd); in cppi41_dma_probe()
1161 struct cppi41_dd *cdd = platform_get_drvdata(pdev); in cppi41_dma_remove() local
1169 dma_async_device_unregister(&cdd->ddev); in cppi41_dma_remove()
1171 devm_free_irq(&pdev->dev, cdd->irq, cdd); in cppi41_dma_remove()
1172 deinit_cppi41(&pdev->dev, cdd); in cppi41_dma_remove()
1180 struct cppi41_dd *cdd = dev_get_drvdata(dev); in cppi41_suspend() local
1182 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); in cppi41_suspend()
1183 disable_sched(cdd); in cppi41_suspend()
1190 struct cppi41_dd *cdd = dev_get_drvdata(dev); in cppi41_resume() local
1195 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); in cppi41_resume()
1197 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) in cppi41_resume()
1201 init_sched(cdd); in cppi41_resume()
1203 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); in cppi41_resume()
1204 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); in cppi41_resume()
1205 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); in cppi41_resume()
1206 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); in cppi41_resume()
1213 struct cppi41_dd *cdd = dev_get_drvdata(dev); in cppi41_runtime_suspend() local
1216 spin_lock_irqsave(&cdd->lock, flags); in cppi41_runtime_suspend()
1217 cdd->is_suspended = true; in cppi41_runtime_suspend()
1218 WARN_ON(!list_empty(&cdd->pending)); in cppi41_runtime_suspend()
1219 spin_unlock_irqrestore(&cdd->lock, flags); in cppi41_runtime_suspend()
1226 struct cppi41_dd *cdd = dev_get_drvdata(dev); in cppi41_runtime_resume() local
1229 spin_lock_irqsave(&cdd->lock, flags); in cppi41_runtime_resume()
1230 cdd->is_suspended = false; in cppi41_runtime_resume()
1231 cppi41_run_queue(cdd); in cppi41_runtime_resume()
1232 spin_unlock_irqrestore(&cdd->lock, flags); in cppi41_runtime_resume()