xref: /linux/drivers/media/pci/mgb4/mgb4_dma.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021-2022 Digiteq Automotive
4  *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
5  *
6  * This module handles the DMA transfers. A standard dmaengine API as provided
7  * by the XDMA module is used.
8  */
9 
10 #include <linux/pci.h>
11 #include <linux/dma-direction.h>
12 #include "mgb4_core.h"
13 #include "mgb4_dma.h"
14 
chan_irq(void * param)15 static void chan_irq(void *param)
16 {
17 	struct mgb4_dma_channel *chan = param;
18 
19 	complete(&chan->req_compl);
20 }
21 
mgb4_dma_transfer(struct mgb4_dev * mgbdev,u32 channel,bool write,u64 paddr,struct sg_table * sgt)22 int mgb4_dma_transfer(struct mgb4_dev *mgbdev, u32 channel, bool write,
23 		      u64 paddr, struct sg_table *sgt)
24 {
25 	struct dma_slave_config cfg;
26 	struct mgb4_dma_channel *chan;
27 	struct dma_async_tx_descriptor *tx;
28 	struct pci_dev *pdev = mgbdev->pdev;
29 	int ret;
30 
31 	memset(&cfg, 0, sizeof(cfg));
32 
33 	if (write) {
34 		cfg.direction = DMA_MEM_TO_DEV;
35 		cfg.dst_addr = paddr;
36 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
37 		chan = &mgbdev->h2c_chan[channel];
38 	} else {
39 		cfg.direction = DMA_DEV_TO_MEM;
40 		cfg.src_addr = paddr;
41 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
42 		chan = &mgbdev->c2h_chan[channel];
43 	}
44 
45 	ret = dmaengine_slave_config(chan->chan, &cfg);
46 	if (ret) {
47 		dev_err(&pdev->dev, "failed to config dma: %d\n", ret);
48 		return ret;
49 	}
50 
51 	tx = dmaengine_prep_slave_sg(chan->chan, sgt->sgl, sgt->nents,
52 				     cfg.direction, 0);
53 	if (!tx) {
54 		dev_err(&pdev->dev, "failed to prep slave sg\n");
55 		return -EIO;
56 	}
57 
58 	tx->callback = chan_irq;
59 	tx->callback_param = chan;
60 
61 	ret = dma_submit_error(dmaengine_submit(tx));
62 	if (ret) {
63 		dev_err(&pdev->dev, "failed to submit sg\n");
64 		return -EIO;
65 	}
66 
67 	dma_async_issue_pending(chan->chan);
68 
69 	if (!wait_for_completion_timeout(&chan->req_compl,
70 					 msecs_to_jiffies(10000))) {
71 		dev_err(&pdev->dev, "dma timeout\n");
72 		dmaengine_terminate_sync(chan->chan);
73 		return -EIO;
74 	}
75 
76 	return 0;
77 }
78 
mgb4_dma_channel_init(struct mgb4_dev * mgbdev)79 int mgb4_dma_channel_init(struct mgb4_dev *mgbdev)
80 {
81 	int i, ret;
82 	char name[16];
83 	struct pci_dev *pdev = mgbdev->pdev;
84 
85 	for (i = 0; i < MGB4_VIN_DEVICES; i++) {
86 		sprintf(name, "c2h%d", i);
87 		mgbdev->c2h_chan[i].chan = dma_request_chan(&pdev->dev, name);
88 		if (IS_ERR(mgbdev->c2h_chan[i].chan)) {
89 			dev_err(&pdev->dev, "failed to initialize %s", name);
90 			ret = PTR_ERR(mgbdev->c2h_chan[i].chan);
91 			mgbdev->c2h_chan[i].chan = NULL;
92 			return ret;
93 		}
94 		init_completion(&mgbdev->c2h_chan[i].req_compl);
95 	}
96 	for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
97 		sprintf(name, "h2c%d", i);
98 		mgbdev->h2c_chan[i].chan = dma_request_chan(&pdev->dev, name);
99 		if (IS_ERR(mgbdev->h2c_chan[i].chan)) {
100 			dev_err(&pdev->dev, "failed to initialize %s", name);
101 			ret = PTR_ERR(mgbdev->h2c_chan[i].chan);
102 			mgbdev->h2c_chan[i].chan = NULL;
103 			return ret;
104 		}
105 		init_completion(&mgbdev->h2c_chan[i].req_compl);
106 	}
107 
108 	return 0;
109 }
110 
mgb4_dma_channel_free(struct mgb4_dev * mgbdev)111 void mgb4_dma_channel_free(struct mgb4_dev *mgbdev)
112 {
113 	int i;
114 
115 	for (i = 0; i < MGB4_VIN_DEVICES; i++) {
116 		if (mgbdev->c2h_chan[i].chan)
117 			dma_release_channel(mgbdev->c2h_chan[i].chan);
118 	}
119 	for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
120 		if (mgbdev->h2c_chan[i].chan)
121 			dma_release_channel(mgbdev->h2c_chan[i].chan);
122 	}
123 }
124