Lines Matching refs:fsl_qdma
313 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; in fsl_qdma_free_chan_resources() local
354 fsl_qdma->desc_allocated--; in fsl_qdma_free_chan_resources()
484 struct fsl_qdma_engine *fsl_qdma) in fsl_qdma_alloc_queue_resources() argument
491 queue_num = fsl_qdma->n_queues; in fsl_qdma_alloc_queue_resources()
492 block_number = fsl_qdma->block_number; in fsl_qdma_alloc_queue_resources()
525 queue_temp->block_base = fsl_qdma->block_base + in fsl_qdma_alloc_queue_resources()
526 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); in fsl_qdma_alloc_queue_resources()
583 static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) in fsl_qdma_halt() argument
587 void __iomem *block, *ctrl = fsl_qdma->ctrl_base; in fsl_qdma_halt()
590 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); in fsl_qdma_halt()
592 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); in fsl_qdma_halt()
593 for (j = 0; j < fsl_qdma->block_number; j++) { in fsl_qdma_halt()
594 block = fsl_qdma->block_base + in fsl_qdma_halt()
595 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); in fsl_qdma_halt()
597 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i)); in fsl_qdma_halt()
600 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR); in fsl_qdma_halt()
608 for (j = 0; j < fsl_qdma->block_number; j++) { in fsl_qdma_halt()
609 block = fsl_qdma->block_base + in fsl_qdma_halt()
610 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); in fsl_qdma_halt()
613 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR); in fsl_qdma_halt()
619 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, in fsl_qdma_halt()
627 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma, in fsl_qdma_queue_transfer_complete() argument
637 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; in fsl_qdma_queue_transfer_complete()
638 struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id]; in fsl_qdma_queue_transfer_complete()
644 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR); in fsl_qdma_queue_transfer_complete()
656 id * fsl_qdma->n_queues; in fsl_qdma_queue_transfer_complete()
680 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); in fsl_qdma_queue_transfer_complete()
687 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); in fsl_qdma_queue_transfer_complete()
695 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); in fsl_qdma_queue_transfer_complete()
701 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); in fsl_qdma_queue_transfer_complete()
723 dev_err(fsl_qdma->dma_dev.dev, in fsl_qdma_queue_transfer_complete()
741 struct fsl_qdma_engine *fsl_qdma = dev_id; in fsl_qdma_error_handler() local
742 void __iomem *status = fsl_qdma->status_base; in fsl_qdma_error_handler()
748 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); in fsl_qdma_error_handler()
751 decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R); in fsl_qdma_error_handler()
752 decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R); in fsl_qdma_error_handler()
753 decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R); in fsl_qdma_error_handler()
754 decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R); in fsl_qdma_error_handler()
755 dev_err(fsl_qdma->dma_dev.dev, in fsl_qdma_error_handler()
760 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); in fsl_qdma_error_handler()
768 struct fsl_qdma_engine *fsl_qdma = dev_id; in fsl_qdma_queue_handler() local
769 void __iomem *block, *ctrl = fsl_qdma->ctrl_base; in fsl_qdma_queue_handler()
771 id = irq - fsl_qdma->irq_base; in fsl_qdma_queue_handler()
772 if (id < 0 && id > fsl_qdma->block_number) { in fsl_qdma_queue_handler()
773 dev_err(fsl_qdma->dma_dev.dev, in fsl_qdma_queue_handler()
775 irq, fsl_qdma->irq_base); in fsl_qdma_queue_handler()
778 block = fsl_qdma->block_base + in fsl_qdma_queue_handler()
779 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id); in fsl_qdma_queue_handler()
781 intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0)); in fsl_qdma_queue_handler()
784 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id); in fsl_qdma_queue_handler()
787 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); in fsl_qdma_queue_handler()
789 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); in fsl_qdma_queue_handler()
790 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0)); in fsl_qdma_queue_handler()
791 dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n"); in fsl_qdma_queue_handler()
795 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, in fsl_qdma_queue_handler()
803 struct fsl_qdma_engine *fsl_qdma) in fsl_qdma_irq_init() argument
810 fsl_qdma->error_irq = in fsl_qdma_irq_init()
812 if (fsl_qdma->error_irq < 0) in fsl_qdma_irq_init()
813 return fsl_qdma->error_irq; in fsl_qdma_irq_init()
815 ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq, in fsl_qdma_irq_init()
817 "qDMA error", fsl_qdma); in fsl_qdma_irq_init()
823 for (i = 0; i < fsl_qdma->block_number; i++) { in fsl_qdma_irq_init()
825 fsl_qdma->queue_irq[i] = in fsl_qdma_irq_init()
828 if (fsl_qdma->queue_irq[i] < 0) in fsl_qdma_irq_init()
829 return fsl_qdma->queue_irq[i]; in fsl_qdma_irq_init()
832 fsl_qdma->queue_irq[i], in fsl_qdma_irq_init()
836 fsl_qdma); in fsl_qdma_irq_init()
844 ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i], in fsl_qdma_irq_init()
850 fsl_qdma->queue_irq[i]); in fsl_qdma_irq_init()
859 struct fsl_qdma_engine *fsl_qdma) in fsl_qdma_irq_exit() argument
863 devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma); in fsl_qdma_irq_exit()
864 for (i = 0; i < fsl_qdma->block_number; i++) in fsl_qdma_irq_exit()
865 devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma); in fsl_qdma_irq_exit()
868 static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) in fsl_qdma_reg_init() argument
873 void __iomem *status = fsl_qdma->status_base; in fsl_qdma_reg_init()
874 void __iomem *block, *ctrl = fsl_qdma->ctrl_base; in fsl_qdma_reg_init()
875 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; in fsl_qdma_reg_init()
878 ret = fsl_qdma_halt(fsl_qdma); in fsl_qdma_reg_init()
880 dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!"); in fsl_qdma_reg_init()
884 for (i = 0; i < fsl_qdma->block_number; i++) { in fsl_qdma_reg_init()
890 block = fsl_qdma->block_base + in fsl_qdma_reg_init()
891 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i); in fsl_qdma_reg_init()
892 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, in fsl_qdma_reg_init()
896 for (j = 0; j < fsl_qdma->block_number; j++) { in fsl_qdma_reg_init()
897 block = fsl_qdma->block_base + in fsl_qdma_reg_init()
898 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); in fsl_qdma_reg_init()
899 for (i = 0; i < fsl_qdma->n_queues; i++) { in fsl_qdma_reg_init()
900 temp = fsl_queue + i + (j * fsl_qdma->n_queues); in fsl_qdma_reg_init()
909 qdma_writel(fsl_qdma, temp->bus_addr, in fsl_qdma_reg_init()
911 qdma_writel(fsl_qdma, temp->bus_addr, in fsl_qdma_reg_init()
918 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i)); in fsl_qdma_reg_init()
927 qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM, in fsl_qdma_reg_init()
937 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr, in fsl_qdma_reg_init()
939 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr, in fsl_qdma_reg_init()
942 qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE, in fsl_qdma_reg_init()
944 qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | in fsl_qdma_reg_init()
947 qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | in fsl_qdma_reg_init()
954 (fsl_qdma->status[j]->n_cq) - 6); in fsl_qdma_reg_init()
956 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); in fsl_qdma_reg_init()
957 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); in fsl_qdma_reg_init()
961 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); in fsl_qdma_reg_init()
962 qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER); in fsl_qdma_reg_init()
964 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); in fsl_qdma_reg_init()
966 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); in fsl_qdma_reg_init()
1070 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; in fsl_qdma_alloc_chan_resources() local
1074 return fsl_qdma->desc_allocated; in fsl_qdma_alloc_chan_resources()
1107 fsl_qdma->desc_allocated++; in fsl_qdma_alloc_chan_resources()
1108 return fsl_qdma->desc_allocated; in fsl_qdma_alloc_chan_resources()
1123 struct fsl_qdma_engine *fsl_qdma; in fsl_qdma_probe() local
1146 len = sizeof(*fsl_qdma); in fsl_qdma_probe()
1147 fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); in fsl_qdma_probe()
1148 if (!fsl_qdma) in fsl_qdma_probe()
1152 fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); in fsl_qdma_probe()
1153 if (!fsl_qdma->chans) in fsl_qdma_probe()
1157 fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); in fsl_qdma_probe()
1158 if (!fsl_qdma->status) in fsl_qdma_probe()
1162 fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); in fsl_qdma_probe()
1163 if (!fsl_qdma->queue_irq) in fsl_qdma_probe()
1172 fsl_qdma->desc_allocated = 0; in fsl_qdma_probe()
1173 fsl_qdma->n_chans = chans; in fsl_qdma_probe()
1174 fsl_qdma->n_queues = queues; in fsl_qdma_probe()
1175 fsl_qdma->block_number = blk_num; in fsl_qdma_probe()
1176 fsl_qdma->block_offset = blk_off; in fsl_qdma_probe()
1178 mutex_init(&fsl_qdma->fsl_qdma_mutex); in fsl_qdma_probe()
1180 for (i = 0; i < fsl_qdma->block_number; i++) { in fsl_qdma_probe()
1181 fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev); in fsl_qdma_probe()
1182 if (!fsl_qdma->status[i]) in fsl_qdma_probe()
1185 fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0); in fsl_qdma_probe()
1186 if (IS_ERR(fsl_qdma->ctrl_base)) in fsl_qdma_probe()
1187 return PTR_ERR(fsl_qdma->ctrl_base); in fsl_qdma_probe()
1189 fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1); in fsl_qdma_probe()
1190 if (IS_ERR(fsl_qdma->status_base)) in fsl_qdma_probe()
1191 return PTR_ERR(fsl_qdma->status_base); in fsl_qdma_probe()
1193 fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2); in fsl_qdma_probe()
1194 if (IS_ERR(fsl_qdma->block_base)) in fsl_qdma_probe()
1195 return PTR_ERR(fsl_qdma->block_base); in fsl_qdma_probe()
1196 fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma); in fsl_qdma_probe()
1197 if (!fsl_qdma->queue) in fsl_qdma_probe()
1200 fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); in fsl_qdma_probe()
1201 if (fsl_qdma->irq_base < 0) in fsl_qdma_probe()
1202 return fsl_qdma->irq_base; in fsl_qdma_probe()
1204 fsl_qdma->feature = of_property_read_bool(np, "big-endian"); in fsl_qdma_probe()
1205 INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); in fsl_qdma_probe()
1207 for (i = 0; i < fsl_qdma->n_chans; i++) { in fsl_qdma_probe()
1208 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; in fsl_qdma_probe()
1210 fsl_chan->qdma = fsl_qdma; in fsl_qdma_probe()
1211 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues * in fsl_qdma_probe()
1212 fsl_qdma->block_number); in fsl_qdma_probe()
1214 vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev); in fsl_qdma_probe()
1217 dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask); in fsl_qdma_probe()
1219 fsl_qdma->dma_dev.dev = &pdev->dev; in fsl_qdma_probe()
1220 fsl_qdma->dma_dev.device_free_chan_resources = in fsl_qdma_probe()
1222 fsl_qdma->dma_dev.device_alloc_chan_resources = in fsl_qdma_probe()
1224 fsl_qdma->dma_dev.device_tx_status = dma_cookie_status; in fsl_qdma_probe()
1225 fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy; in fsl_qdma_probe()
1226 fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending; in fsl_qdma_probe()
1227 fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize; in fsl_qdma_probe()
1228 fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all; in fsl_qdma_probe()
1236 platform_set_drvdata(pdev, fsl_qdma); in fsl_qdma_probe()
1238 ret = fsl_qdma_reg_init(fsl_qdma); in fsl_qdma_probe()
1244 ret = fsl_qdma_irq_init(pdev, fsl_qdma); in fsl_qdma_probe()
1248 ret = dma_async_device_register(&fsl_qdma->dma_dev); in fsl_qdma_probe()
1271 struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); in fsl_qdma_remove() local
1273 fsl_qdma_irq_exit(pdev, fsl_qdma); in fsl_qdma_remove()
1274 fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev); in fsl_qdma_remove()
1276 dma_async_device_unregister(&fsl_qdma->dma_dev); in fsl_qdma_remove()