xref: /linux/drivers/dma/loongson/loongson1-apb-dma.c (revision d662a710c668a86a39ebaad334d9960a0cc776c2)
1*ffee2dc0SBinbin Zhou // SPDX-License-Identifier: GPL-2.0-or-later
2*ffee2dc0SBinbin Zhou /*
3*ffee2dc0SBinbin Zhou  * Driver for Loongson-1 APB DMA Controller
4*ffee2dc0SBinbin Zhou  *
5*ffee2dc0SBinbin Zhou  * Copyright (C) 2015-2024 Keguang Zhang <keguang.zhang@gmail.com>
6*ffee2dc0SBinbin Zhou  */
7*ffee2dc0SBinbin Zhou 
8*ffee2dc0SBinbin Zhou #include <linux/dmapool.h>
9*ffee2dc0SBinbin Zhou #include <linux/dma-mapping.h>
10*ffee2dc0SBinbin Zhou #include <linux/init.h>
11*ffee2dc0SBinbin Zhou #include <linux/interrupt.h>
12*ffee2dc0SBinbin Zhou #include <linux/iopoll.h>
13*ffee2dc0SBinbin Zhou #include <linux/module.h>
14*ffee2dc0SBinbin Zhou #include <linux/of.h>
15*ffee2dc0SBinbin Zhou #include <linux/of_dma.h>
16*ffee2dc0SBinbin Zhou #include <linux/platform_device.h>
17*ffee2dc0SBinbin Zhou #include <linux/slab.h>
18*ffee2dc0SBinbin Zhou 
19*ffee2dc0SBinbin Zhou #include "../dmaengine.h"
20*ffee2dc0SBinbin Zhou #include "../virt-dma.h"
21*ffee2dc0SBinbin Zhou 
22*ffee2dc0SBinbin Zhou /* Loongson-1 DMA Control Register */
23*ffee2dc0SBinbin Zhou #define LS1X_DMA_CTRL		0x0
24*ffee2dc0SBinbin Zhou 
25*ffee2dc0SBinbin Zhou /* DMA Control Register Bits */
26*ffee2dc0SBinbin Zhou #define LS1X_DMA_STOP		BIT(4)
27*ffee2dc0SBinbin Zhou #define LS1X_DMA_START		BIT(3)
28*ffee2dc0SBinbin Zhou #define LS1X_DMA_ASK_VALID	BIT(2)
29*ffee2dc0SBinbin Zhou 
30*ffee2dc0SBinbin Zhou /* DMA Next Field Bits */
31*ffee2dc0SBinbin Zhou #define LS1X_DMA_NEXT_VALID	BIT(0)
32*ffee2dc0SBinbin Zhou 
33*ffee2dc0SBinbin Zhou /* DMA Command Field Bits */
34*ffee2dc0SBinbin Zhou #define LS1X_DMA_RAM2DEV	BIT(12)
35*ffee2dc0SBinbin Zhou #define LS1X_DMA_INT		BIT(1)
36*ffee2dc0SBinbin Zhou #define LS1X_DMA_INT_MASK	BIT(0)
37*ffee2dc0SBinbin Zhou 
38*ffee2dc0SBinbin Zhou #define LS1X_DMA_LLI_ALIGNMENT	64
39*ffee2dc0SBinbin Zhou #define LS1X_DMA_LLI_ADDR_MASK	GENMASK(31, __ffs(LS1X_DMA_LLI_ALIGNMENT))
40*ffee2dc0SBinbin Zhou #define LS1X_DMA_MAX_CHANNELS	3
41*ffee2dc0SBinbin Zhou 
42*ffee2dc0SBinbin Zhou enum ls1x_dmadesc_offsets {
43*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_NEXT = 0,
44*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_SADDR,
45*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_DADDR,
46*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_LENGTH,
47*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_STRIDE,
48*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_CYCLES,
49*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_CMD,
50*ffee2dc0SBinbin Zhou 	LS1X_DMADESC_SIZE
51*ffee2dc0SBinbin Zhou };
52*ffee2dc0SBinbin Zhou 
53*ffee2dc0SBinbin Zhou struct ls1x_dma_lli {
54*ffee2dc0SBinbin Zhou 	unsigned int hw[LS1X_DMADESC_SIZE];
55*ffee2dc0SBinbin Zhou 	dma_addr_t phys;
56*ffee2dc0SBinbin Zhou 	struct list_head node;
57*ffee2dc0SBinbin Zhou } __aligned(LS1X_DMA_LLI_ALIGNMENT);
58*ffee2dc0SBinbin Zhou 
59*ffee2dc0SBinbin Zhou struct ls1x_dma_desc {
60*ffee2dc0SBinbin Zhou 	struct virt_dma_desc vd;
61*ffee2dc0SBinbin Zhou 	struct list_head lli_list;
62*ffee2dc0SBinbin Zhou };
63*ffee2dc0SBinbin Zhou 
64*ffee2dc0SBinbin Zhou struct ls1x_dma_chan {
65*ffee2dc0SBinbin Zhou 	struct virt_dma_chan vc;
66*ffee2dc0SBinbin Zhou 	struct dma_pool *lli_pool;
67*ffee2dc0SBinbin Zhou 	phys_addr_t src_addr;
68*ffee2dc0SBinbin Zhou 	phys_addr_t dst_addr;
69*ffee2dc0SBinbin Zhou 	enum dma_slave_buswidth src_addr_width;
70*ffee2dc0SBinbin Zhou 	enum dma_slave_buswidth dst_addr_width;
71*ffee2dc0SBinbin Zhou 	unsigned int bus_width;
72*ffee2dc0SBinbin Zhou 	void __iomem *reg_base;
73*ffee2dc0SBinbin Zhou 	int irq;
74*ffee2dc0SBinbin Zhou 	bool is_cyclic;
75*ffee2dc0SBinbin Zhou 	struct ls1x_dma_lli *curr_lli;
76*ffee2dc0SBinbin Zhou };
77*ffee2dc0SBinbin Zhou 
78*ffee2dc0SBinbin Zhou struct ls1x_dma {
79*ffee2dc0SBinbin Zhou 	struct dma_device ddev;
80*ffee2dc0SBinbin Zhou 	unsigned int nr_chans;
81*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan chan[];
82*ffee2dc0SBinbin Zhou };
83*ffee2dc0SBinbin Zhou 
84*ffee2dc0SBinbin Zhou static irqreturn_t ls1x_dma_irq_handler(int irq, void *data);
85*ffee2dc0SBinbin Zhou 
86*ffee2dc0SBinbin Zhou #define to_ls1x_dma_chan(dchan)		\
87*ffee2dc0SBinbin Zhou 	container_of(dchan, struct ls1x_dma_chan, vc.chan)
88*ffee2dc0SBinbin Zhou 
89*ffee2dc0SBinbin Zhou #define to_ls1x_dma_desc(d)		\
90*ffee2dc0SBinbin Zhou 	container_of(d, struct ls1x_dma_desc, vd)
91*ffee2dc0SBinbin Zhou 
92*ffee2dc0SBinbin Zhou static inline struct device *chan2dev(struct dma_chan *chan)
93*ffee2dc0SBinbin Zhou {
94*ffee2dc0SBinbin Zhou 	return &chan->dev->device;
95*ffee2dc0SBinbin Zhou }
96*ffee2dc0SBinbin Zhou 
97*ffee2dc0SBinbin Zhou static inline int ls1x_dma_query(struct ls1x_dma_chan *chan,
98*ffee2dc0SBinbin Zhou 				 dma_addr_t *lli_phys)
99*ffee2dc0SBinbin Zhou {
100*ffee2dc0SBinbin Zhou 	struct dma_chan *dchan = &chan->vc.chan;
101*ffee2dc0SBinbin Zhou 	int val, ret;
102*ffee2dc0SBinbin Zhou 
103*ffee2dc0SBinbin Zhou 	val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
104*ffee2dc0SBinbin Zhou 	val |= LS1X_DMA_ASK_VALID;
105*ffee2dc0SBinbin Zhou 	val |= dchan->chan_id;
106*ffee2dc0SBinbin Zhou 	writel(val, chan->reg_base + LS1X_DMA_CTRL);
107*ffee2dc0SBinbin Zhou 	ret = readl_poll_timeout_atomic(chan->reg_base + LS1X_DMA_CTRL, val,
108*ffee2dc0SBinbin Zhou 					!(val & LS1X_DMA_ASK_VALID), 0, 3000);
109*ffee2dc0SBinbin Zhou 	if (ret)
110*ffee2dc0SBinbin Zhou 		dev_err(chan2dev(dchan), "failed to query DMA\n");
111*ffee2dc0SBinbin Zhou 
112*ffee2dc0SBinbin Zhou 	return ret;
113*ffee2dc0SBinbin Zhou }
114*ffee2dc0SBinbin Zhou 
115*ffee2dc0SBinbin Zhou static inline int ls1x_dma_start(struct ls1x_dma_chan *chan,
116*ffee2dc0SBinbin Zhou 				 dma_addr_t *lli_phys)
117*ffee2dc0SBinbin Zhou {
118*ffee2dc0SBinbin Zhou 	struct dma_chan *dchan = &chan->vc.chan;
119*ffee2dc0SBinbin Zhou 	struct device *dev = chan2dev(dchan);
120*ffee2dc0SBinbin Zhou 	int val, ret;
121*ffee2dc0SBinbin Zhou 
122*ffee2dc0SBinbin Zhou 	val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
123*ffee2dc0SBinbin Zhou 	val |= LS1X_DMA_START;
124*ffee2dc0SBinbin Zhou 	val |= dchan->chan_id;
125*ffee2dc0SBinbin Zhou 	writel(val, chan->reg_base + LS1X_DMA_CTRL);
126*ffee2dc0SBinbin Zhou 	ret = readl_poll_timeout(chan->reg_base + LS1X_DMA_CTRL, val,
127*ffee2dc0SBinbin Zhou 				 !(val & LS1X_DMA_START), 0, 1000);
128*ffee2dc0SBinbin Zhou 	if (!ret)
129*ffee2dc0SBinbin Zhou 		dev_dbg(dev, "start DMA with lli_phys=%pad\n", lli_phys);
130*ffee2dc0SBinbin Zhou 	else
131*ffee2dc0SBinbin Zhou 		dev_err(dev, "failed to start DMA\n");
132*ffee2dc0SBinbin Zhou 
133*ffee2dc0SBinbin Zhou 	return ret;
134*ffee2dc0SBinbin Zhou }
135*ffee2dc0SBinbin Zhou 
136*ffee2dc0SBinbin Zhou static inline void ls1x_dma_stop(struct ls1x_dma_chan *chan)
137*ffee2dc0SBinbin Zhou {
138*ffee2dc0SBinbin Zhou 	int val = readl(chan->reg_base + LS1X_DMA_CTRL);
139*ffee2dc0SBinbin Zhou 
140*ffee2dc0SBinbin Zhou 	writel(val | LS1X_DMA_STOP, chan->reg_base + LS1X_DMA_CTRL);
141*ffee2dc0SBinbin Zhou }
142*ffee2dc0SBinbin Zhou 
143*ffee2dc0SBinbin Zhou static void ls1x_dma_free_chan_resources(struct dma_chan *dchan)
144*ffee2dc0SBinbin Zhou {
145*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
146*ffee2dc0SBinbin Zhou 	struct device *dev = chan2dev(dchan);
147*ffee2dc0SBinbin Zhou 
148*ffee2dc0SBinbin Zhou 	dma_free_coherent(dev, sizeof(struct ls1x_dma_lli),
149*ffee2dc0SBinbin Zhou 			  chan->curr_lli, chan->curr_lli->phys);
150*ffee2dc0SBinbin Zhou 	dma_pool_destroy(chan->lli_pool);
151*ffee2dc0SBinbin Zhou 	chan->lli_pool = NULL;
152*ffee2dc0SBinbin Zhou 	devm_free_irq(dev, chan->irq, chan);
153*ffee2dc0SBinbin Zhou 	vchan_free_chan_resources(&chan->vc);
154*ffee2dc0SBinbin Zhou }
155*ffee2dc0SBinbin Zhou 
156*ffee2dc0SBinbin Zhou static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
157*ffee2dc0SBinbin Zhou {
158*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
159*ffee2dc0SBinbin Zhou 	struct device *dev = chan2dev(dchan);
160*ffee2dc0SBinbin Zhou 	dma_addr_t phys;
161*ffee2dc0SBinbin Zhou 	int ret;
162*ffee2dc0SBinbin Zhou 
163*ffee2dc0SBinbin Zhou 	ret = devm_request_irq(dev, chan->irq, ls1x_dma_irq_handler,
164*ffee2dc0SBinbin Zhou 			       IRQF_SHARED, dma_chan_name(dchan), chan);
165*ffee2dc0SBinbin Zhou 	if (ret) {
166*ffee2dc0SBinbin Zhou 		dev_err(dev, "failed to request IRQ %d\n", chan->irq);
167*ffee2dc0SBinbin Zhou 		return ret;
168*ffee2dc0SBinbin Zhou 	}
169*ffee2dc0SBinbin Zhou 
170*ffee2dc0SBinbin Zhou 	chan->lli_pool = dma_pool_create(dma_chan_name(dchan), dev,
171*ffee2dc0SBinbin Zhou 					 sizeof(struct ls1x_dma_lli),
172*ffee2dc0SBinbin Zhou 					 __alignof__(struct ls1x_dma_lli), 0);
173*ffee2dc0SBinbin Zhou 	if (!chan->lli_pool)
174*ffee2dc0SBinbin Zhou 		return -ENOMEM;
175*ffee2dc0SBinbin Zhou 
176*ffee2dc0SBinbin Zhou 	/* allocate memory for querying the current lli */
177*ffee2dc0SBinbin Zhou 	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
178*ffee2dc0SBinbin Zhou 	chan->curr_lli = dma_alloc_coherent(dev, sizeof(struct ls1x_dma_lli),
179*ffee2dc0SBinbin Zhou 					    &phys, GFP_KERNEL);
180*ffee2dc0SBinbin Zhou 	if (!chan->curr_lli) {
181*ffee2dc0SBinbin Zhou 		dma_pool_destroy(chan->lli_pool);
182*ffee2dc0SBinbin Zhou 		return -ENOMEM;
183*ffee2dc0SBinbin Zhou 	}
184*ffee2dc0SBinbin Zhou 	chan->curr_lli->phys = phys;
185*ffee2dc0SBinbin Zhou 
186*ffee2dc0SBinbin Zhou 	return 0;
187*ffee2dc0SBinbin Zhou }
188*ffee2dc0SBinbin Zhou 
189*ffee2dc0SBinbin Zhou static void ls1x_dma_free_desc(struct virt_dma_desc *vd)
190*ffee2dc0SBinbin Zhou {
191*ffee2dc0SBinbin Zhou 	struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
192*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(vd->tx.chan);
193*ffee2dc0SBinbin Zhou 	struct ls1x_dma_lli *lli, *_lli;
194*ffee2dc0SBinbin Zhou 
195*ffee2dc0SBinbin Zhou 	list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) {
196*ffee2dc0SBinbin Zhou 		list_del(&lli->node);
197*ffee2dc0SBinbin Zhou 		dma_pool_free(chan->lli_pool, lli, lli->phys);
198*ffee2dc0SBinbin Zhou 	}
199*ffee2dc0SBinbin Zhou 
200*ffee2dc0SBinbin Zhou 	kfree(desc);
201*ffee2dc0SBinbin Zhou }
202*ffee2dc0SBinbin Zhou 
203*ffee2dc0SBinbin Zhou static struct ls1x_dma_desc *ls1x_dma_alloc_desc(void)
204*ffee2dc0SBinbin Zhou {
205*ffee2dc0SBinbin Zhou 	struct ls1x_dma_desc *desc;
206*ffee2dc0SBinbin Zhou 
207*ffee2dc0SBinbin Zhou 	desc = kzalloc_obj(*desc, GFP_NOWAIT);
208*ffee2dc0SBinbin Zhou 	if (!desc)
209*ffee2dc0SBinbin Zhou 		return NULL;
210*ffee2dc0SBinbin Zhou 
211*ffee2dc0SBinbin Zhou 	INIT_LIST_HEAD(&desc->lli_list);
212*ffee2dc0SBinbin Zhou 
213*ffee2dc0SBinbin Zhou 	return desc;
214*ffee2dc0SBinbin Zhou }
215*ffee2dc0SBinbin Zhou 
216*ffee2dc0SBinbin Zhou static int ls1x_dma_prep_lli(struct dma_chan *dchan, struct ls1x_dma_desc *desc,
217*ffee2dc0SBinbin Zhou 			     struct scatterlist *sgl, unsigned int sg_len,
218*ffee2dc0SBinbin Zhou 			     enum dma_transfer_direction dir, bool is_cyclic)
219*ffee2dc0SBinbin Zhou {
220*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
221*ffee2dc0SBinbin Zhou 	struct ls1x_dma_lli *lli, *prev = NULL, *first = NULL;
222*ffee2dc0SBinbin Zhou 	struct device *dev = chan2dev(dchan);
223*ffee2dc0SBinbin Zhou 	struct list_head *pos = NULL;
224*ffee2dc0SBinbin Zhou 	struct scatterlist *sg;
225*ffee2dc0SBinbin Zhou 	unsigned int dev_addr, cmd, i;
226*ffee2dc0SBinbin Zhou 
227*ffee2dc0SBinbin Zhou 	switch (dir) {
228*ffee2dc0SBinbin Zhou 	case DMA_MEM_TO_DEV:
229*ffee2dc0SBinbin Zhou 		dev_addr = chan->dst_addr;
230*ffee2dc0SBinbin Zhou 		chan->bus_width = chan->dst_addr_width;
231*ffee2dc0SBinbin Zhou 		cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
232*ffee2dc0SBinbin Zhou 		break;
233*ffee2dc0SBinbin Zhou 	case DMA_DEV_TO_MEM:
234*ffee2dc0SBinbin Zhou 		dev_addr = chan->src_addr;
235*ffee2dc0SBinbin Zhou 		chan->bus_width = chan->src_addr_width;
236*ffee2dc0SBinbin Zhou 		cmd = LS1X_DMA_INT;
237*ffee2dc0SBinbin Zhou 		break;
238*ffee2dc0SBinbin Zhou 	default:
239*ffee2dc0SBinbin Zhou 		dev_err(dev, "unsupported DMA direction: %s\n",
240*ffee2dc0SBinbin Zhou 			dmaengine_get_direction_text(dir));
241*ffee2dc0SBinbin Zhou 		return -EINVAL;
242*ffee2dc0SBinbin Zhou 	}
243*ffee2dc0SBinbin Zhou 
244*ffee2dc0SBinbin Zhou 	for_each_sg(sgl, sg, sg_len, i) {
245*ffee2dc0SBinbin Zhou 		dma_addr_t buf_addr = sg_dma_address(sg);
246*ffee2dc0SBinbin Zhou 		size_t buf_len = sg_dma_len(sg);
247*ffee2dc0SBinbin Zhou 		dma_addr_t phys;
248*ffee2dc0SBinbin Zhou 
249*ffee2dc0SBinbin Zhou 		if (!is_dma_copy_aligned(dchan->device, buf_addr, 0, buf_len)) {
250*ffee2dc0SBinbin Zhou 			dev_err(dev, "buffer is not aligned\n");
251*ffee2dc0SBinbin Zhou 			return -EINVAL;
252*ffee2dc0SBinbin Zhou 		}
253*ffee2dc0SBinbin Zhou 
254*ffee2dc0SBinbin Zhou 		/* allocate HW descriptors */
255*ffee2dc0SBinbin Zhou 		lli = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT, &phys);
256*ffee2dc0SBinbin Zhou 		if (!lli) {
257*ffee2dc0SBinbin Zhou 			dev_err(dev, "failed to alloc lli %u\n", i);
258*ffee2dc0SBinbin Zhou 			return -ENOMEM;
259*ffee2dc0SBinbin Zhou 		}
260*ffee2dc0SBinbin Zhou 
261*ffee2dc0SBinbin Zhou 		/* setup HW descriptors */
262*ffee2dc0SBinbin Zhou 		lli->phys = phys;
263*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_SADDR] = buf_addr;
264*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_DADDR] = dev_addr;
265*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_LENGTH] = buf_len / chan->bus_width;
266*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_STRIDE] = 0;
267*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_CYCLES] = 1;
268*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_CMD] = cmd;
269*ffee2dc0SBinbin Zhou 
270*ffee2dc0SBinbin Zhou 		if (prev)
271*ffee2dc0SBinbin Zhou 			prev->hw[LS1X_DMADESC_NEXT] =
272*ffee2dc0SBinbin Zhou 			    lli->phys | LS1X_DMA_NEXT_VALID;
273*ffee2dc0SBinbin Zhou 		prev = lli;
274*ffee2dc0SBinbin Zhou 
275*ffee2dc0SBinbin Zhou 		if (!first)
276*ffee2dc0SBinbin Zhou 			first = lli;
277*ffee2dc0SBinbin Zhou 
278*ffee2dc0SBinbin Zhou 		list_add_tail(&lli->node, &desc->lli_list);
279*ffee2dc0SBinbin Zhou 	}
280*ffee2dc0SBinbin Zhou 
281*ffee2dc0SBinbin Zhou 	if (is_cyclic) {
282*ffee2dc0SBinbin Zhou 		lli->hw[LS1X_DMADESC_NEXT] = first->phys | LS1X_DMA_NEXT_VALID;
283*ffee2dc0SBinbin Zhou 		chan->is_cyclic = is_cyclic;
284*ffee2dc0SBinbin Zhou 	}
285*ffee2dc0SBinbin Zhou 
286*ffee2dc0SBinbin Zhou 	list_for_each(pos, &desc->lli_list) {
287*ffee2dc0SBinbin Zhou 		lli = list_entry(pos, struct ls1x_dma_lli, node);
288*ffee2dc0SBinbin Zhou 		print_hex_dump_debug("LLI: ", DUMP_PREFIX_OFFSET, 16, 4,
289*ffee2dc0SBinbin Zhou 				     lli, sizeof(*lli), false);
290*ffee2dc0SBinbin Zhou 	}
291*ffee2dc0SBinbin Zhou 
292*ffee2dc0SBinbin Zhou 	return 0;
293*ffee2dc0SBinbin Zhou }
294*ffee2dc0SBinbin Zhou 
295*ffee2dc0SBinbin Zhou static struct dma_async_tx_descriptor *
296*ffee2dc0SBinbin Zhou ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
297*ffee2dc0SBinbin Zhou 		       unsigned int sg_len, enum dma_transfer_direction dir,
298*ffee2dc0SBinbin Zhou 		       unsigned long flags, void *context)
299*ffee2dc0SBinbin Zhou {
300*ffee2dc0SBinbin Zhou 	struct ls1x_dma_desc *desc;
301*ffee2dc0SBinbin Zhou 
302*ffee2dc0SBinbin Zhou 	dev_dbg(chan2dev(dchan), "sg_len=%u flags=0x%lx dir=%s\n",
303*ffee2dc0SBinbin Zhou 		sg_len, flags, dmaengine_get_direction_text(dir));
304*ffee2dc0SBinbin Zhou 
305*ffee2dc0SBinbin Zhou 	desc = ls1x_dma_alloc_desc();
306*ffee2dc0SBinbin Zhou 	if (!desc)
307*ffee2dc0SBinbin Zhou 		return NULL;
308*ffee2dc0SBinbin Zhou 
309*ffee2dc0SBinbin Zhou 	if (ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, false)) {
310*ffee2dc0SBinbin Zhou 		ls1x_dma_free_desc(&desc->vd);
311*ffee2dc0SBinbin Zhou 		return NULL;
312*ffee2dc0SBinbin Zhou 	}
313*ffee2dc0SBinbin Zhou 
314*ffee2dc0SBinbin Zhou 	return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
315*ffee2dc0SBinbin Zhou }
316*ffee2dc0SBinbin Zhou 
317*ffee2dc0SBinbin Zhou static struct dma_async_tx_descriptor *
318*ffee2dc0SBinbin Zhou ls1x_dma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
319*ffee2dc0SBinbin Zhou 			 size_t buf_len, size_t period_len,
320*ffee2dc0SBinbin Zhou 			 enum dma_transfer_direction dir, unsigned long flags)
321*ffee2dc0SBinbin Zhou {
322*ffee2dc0SBinbin Zhou 	struct ls1x_dma_desc *desc;
323*ffee2dc0SBinbin Zhou 	struct scatterlist *sgl;
324*ffee2dc0SBinbin Zhou 	unsigned int sg_len;
325*ffee2dc0SBinbin Zhou 	unsigned int i;
326*ffee2dc0SBinbin Zhou 	int ret;
327*ffee2dc0SBinbin Zhou 
328*ffee2dc0SBinbin Zhou 	dev_dbg(chan2dev(dchan),
329*ffee2dc0SBinbin Zhou 		"buf_len=%zu period_len=%zu flags=0x%lx dir=%s\n",
330*ffee2dc0SBinbin Zhou 		buf_len, period_len, flags, dmaengine_get_direction_text(dir));
331*ffee2dc0SBinbin Zhou 
332*ffee2dc0SBinbin Zhou 	desc = ls1x_dma_alloc_desc();
333*ffee2dc0SBinbin Zhou 	if (!desc)
334*ffee2dc0SBinbin Zhou 		return NULL;
335*ffee2dc0SBinbin Zhou 
336*ffee2dc0SBinbin Zhou 	/* allocate the scatterlist */
337*ffee2dc0SBinbin Zhou 	sg_len = buf_len / period_len;
338*ffee2dc0SBinbin Zhou 	sgl = kmalloc_objs(*sgl, sg_len, GFP_NOWAIT);
339*ffee2dc0SBinbin Zhou 	if (!sgl)
340*ffee2dc0SBinbin Zhou 		return NULL;
341*ffee2dc0SBinbin Zhou 
342*ffee2dc0SBinbin Zhou 	sg_init_table(sgl, sg_len);
343*ffee2dc0SBinbin Zhou 	for (i = 0; i < sg_len; ++i) {
344*ffee2dc0SBinbin Zhou 		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(buf_addr)),
345*ffee2dc0SBinbin Zhou 			    period_len, offset_in_page(buf_addr));
346*ffee2dc0SBinbin Zhou 		sg_dma_address(&sgl[i]) = buf_addr;
347*ffee2dc0SBinbin Zhou 		sg_dma_len(&sgl[i]) = period_len;
348*ffee2dc0SBinbin Zhou 		buf_addr += period_len;
349*ffee2dc0SBinbin Zhou 	}
350*ffee2dc0SBinbin Zhou 
351*ffee2dc0SBinbin Zhou 	ret = ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, true);
352*ffee2dc0SBinbin Zhou 	kfree(sgl);
353*ffee2dc0SBinbin Zhou 	if (ret) {
354*ffee2dc0SBinbin Zhou 		ls1x_dma_free_desc(&desc->vd);
355*ffee2dc0SBinbin Zhou 		return NULL;
356*ffee2dc0SBinbin Zhou 	}
357*ffee2dc0SBinbin Zhou 
358*ffee2dc0SBinbin Zhou 	return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
359*ffee2dc0SBinbin Zhou }
360*ffee2dc0SBinbin Zhou 
361*ffee2dc0SBinbin Zhou static int ls1x_dma_slave_config(struct dma_chan *dchan,
362*ffee2dc0SBinbin Zhou 				 struct dma_slave_config *config)
363*ffee2dc0SBinbin Zhou {
364*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
365*ffee2dc0SBinbin Zhou 
366*ffee2dc0SBinbin Zhou 	chan->src_addr = config->src_addr;
367*ffee2dc0SBinbin Zhou 	chan->src_addr_width = config->src_addr_width;
368*ffee2dc0SBinbin Zhou 	chan->dst_addr = config->dst_addr;
369*ffee2dc0SBinbin Zhou 	chan->dst_addr_width = config->dst_addr_width;
370*ffee2dc0SBinbin Zhou 
371*ffee2dc0SBinbin Zhou 	return 0;
372*ffee2dc0SBinbin Zhou }
373*ffee2dc0SBinbin Zhou 
374*ffee2dc0SBinbin Zhou static int ls1x_dma_pause(struct dma_chan *dchan)
375*ffee2dc0SBinbin Zhou {
376*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
377*ffee2dc0SBinbin Zhou 	int ret;
378*ffee2dc0SBinbin Zhou 
379*ffee2dc0SBinbin Zhou 	guard(spinlock_irqsave)(&chan->vc.lock);
380*ffee2dc0SBinbin Zhou 	/* save the current lli */
381*ffee2dc0SBinbin Zhou 	ret = ls1x_dma_query(chan, &chan->curr_lli->phys);
382*ffee2dc0SBinbin Zhou 	if (!ret)
383*ffee2dc0SBinbin Zhou 		ls1x_dma_stop(chan);
384*ffee2dc0SBinbin Zhou 
385*ffee2dc0SBinbin Zhou 	return ret;
386*ffee2dc0SBinbin Zhou }
387*ffee2dc0SBinbin Zhou 
388*ffee2dc0SBinbin Zhou static int ls1x_dma_resume(struct dma_chan *dchan)
389*ffee2dc0SBinbin Zhou {
390*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
391*ffee2dc0SBinbin Zhou 
392*ffee2dc0SBinbin Zhou 	guard(spinlock_irqsave)(&chan->vc.lock);
393*ffee2dc0SBinbin Zhou 
394*ffee2dc0SBinbin Zhou 	return ls1x_dma_start(chan, &chan->curr_lli->phys);
395*ffee2dc0SBinbin Zhou }
396*ffee2dc0SBinbin Zhou 
397*ffee2dc0SBinbin Zhou static int ls1x_dma_terminate_all(struct dma_chan *dchan)
398*ffee2dc0SBinbin Zhou {
399*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
400*ffee2dc0SBinbin Zhou 	struct virt_dma_desc *vd;
401*ffee2dc0SBinbin Zhou 	LIST_HEAD(head);
402*ffee2dc0SBinbin Zhou 
403*ffee2dc0SBinbin Zhou 	ls1x_dma_stop(chan);
404*ffee2dc0SBinbin Zhou 
405*ffee2dc0SBinbin Zhou 	scoped_guard(spinlock_irqsave, &chan->vc.lock) {
406*ffee2dc0SBinbin Zhou 		vd = vchan_next_desc(&chan->vc);
407*ffee2dc0SBinbin Zhou 		if (vd)
408*ffee2dc0SBinbin Zhou 			vchan_terminate_vdesc(vd);
409*ffee2dc0SBinbin Zhou 
410*ffee2dc0SBinbin Zhou 		vchan_get_all_descriptors(&chan->vc, &head);
411*ffee2dc0SBinbin Zhou 	}
412*ffee2dc0SBinbin Zhou 
413*ffee2dc0SBinbin Zhou 	vchan_dma_desc_free_list(&chan->vc, &head);
414*ffee2dc0SBinbin Zhou 
415*ffee2dc0SBinbin Zhou 	return 0;
416*ffee2dc0SBinbin Zhou }
417*ffee2dc0SBinbin Zhou 
418*ffee2dc0SBinbin Zhou static void ls1x_dma_synchronize(struct dma_chan *dchan)
419*ffee2dc0SBinbin Zhou {
420*ffee2dc0SBinbin Zhou 	vchan_synchronize(to_virt_chan(dchan));
421*ffee2dc0SBinbin Zhou }
422*ffee2dc0SBinbin Zhou 
423*ffee2dc0SBinbin Zhou static enum dma_status ls1x_dma_tx_status(struct dma_chan *dchan,
424*ffee2dc0SBinbin Zhou 					  dma_cookie_t cookie,
425*ffee2dc0SBinbin Zhou 					  struct dma_tx_state *state)
426*ffee2dc0SBinbin Zhou {
427*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
428*ffee2dc0SBinbin Zhou 	struct virt_dma_desc *vd;
429*ffee2dc0SBinbin Zhou 	enum dma_status status;
430*ffee2dc0SBinbin Zhou 	size_t bytes = 0;
431*ffee2dc0SBinbin Zhou 
432*ffee2dc0SBinbin Zhou 	status = dma_cookie_status(dchan, cookie, state);
433*ffee2dc0SBinbin Zhou 	if (status == DMA_COMPLETE)
434*ffee2dc0SBinbin Zhou 		return status;
435*ffee2dc0SBinbin Zhou 
436*ffee2dc0SBinbin Zhou 	scoped_guard(spinlock_irqsave, &chan->vc.lock) {
437*ffee2dc0SBinbin Zhou 		vd = vchan_find_desc(&chan->vc, cookie);
438*ffee2dc0SBinbin Zhou 		if (vd) {
439*ffee2dc0SBinbin Zhou 			struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
440*ffee2dc0SBinbin Zhou 			struct ls1x_dma_lli *lli;
441*ffee2dc0SBinbin Zhou 			dma_addr_t next_phys;
442*ffee2dc0SBinbin Zhou 
443*ffee2dc0SBinbin Zhou 			/* get the current lli */
444*ffee2dc0SBinbin Zhou 			if (ls1x_dma_query(chan, &chan->curr_lli->phys))
445*ffee2dc0SBinbin Zhou 				return status;
446*ffee2dc0SBinbin Zhou 
447*ffee2dc0SBinbin Zhou 			/* locate the current lli */
448*ffee2dc0SBinbin Zhou 			next_phys = chan->curr_lli->hw[LS1X_DMADESC_NEXT];
449*ffee2dc0SBinbin Zhou 			list_for_each_entry(lli, &desc->lli_list, node)
450*ffee2dc0SBinbin Zhou 				if (lli->hw[LS1X_DMADESC_NEXT] == next_phys)
451*ffee2dc0SBinbin Zhou 					break;
452*ffee2dc0SBinbin Zhou 
453*ffee2dc0SBinbin Zhou 			dev_dbg(chan2dev(dchan), "current lli_phys=%pad",
454*ffee2dc0SBinbin Zhou 				&lli->phys);
455*ffee2dc0SBinbin Zhou 
456*ffee2dc0SBinbin Zhou 			/* count the residues */
457*ffee2dc0SBinbin Zhou 			list_for_each_entry_from(lli, &desc->lli_list, node)
458*ffee2dc0SBinbin Zhou 				bytes += lli->hw[LS1X_DMADESC_LENGTH] *
459*ffee2dc0SBinbin Zhou 					 chan->bus_width;
460*ffee2dc0SBinbin Zhou 		}
461*ffee2dc0SBinbin Zhou 	}
462*ffee2dc0SBinbin Zhou 
463*ffee2dc0SBinbin Zhou 	dma_set_residue(state, bytes);
464*ffee2dc0SBinbin Zhou 
465*ffee2dc0SBinbin Zhou 	return status;
466*ffee2dc0SBinbin Zhou }
467*ffee2dc0SBinbin Zhou 
468*ffee2dc0SBinbin Zhou static void ls1x_dma_issue_pending(struct dma_chan *dchan)
469*ffee2dc0SBinbin Zhou {
470*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
471*ffee2dc0SBinbin Zhou 
472*ffee2dc0SBinbin Zhou 	guard(spinlock_irqsave)(&chan->vc.lock);
473*ffee2dc0SBinbin Zhou 
474*ffee2dc0SBinbin Zhou 	if (vchan_issue_pending(&chan->vc)) {
475*ffee2dc0SBinbin Zhou 		struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
476*ffee2dc0SBinbin Zhou 
477*ffee2dc0SBinbin Zhou 		if (vd) {
478*ffee2dc0SBinbin Zhou 			struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
479*ffee2dc0SBinbin Zhou 			struct ls1x_dma_lli *lli;
480*ffee2dc0SBinbin Zhou 
481*ffee2dc0SBinbin Zhou 			lli = list_first_entry(&desc->lli_list,
482*ffee2dc0SBinbin Zhou 					       struct ls1x_dma_lli, node);
483*ffee2dc0SBinbin Zhou 			ls1x_dma_start(chan, &lli->phys);
484*ffee2dc0SBinbin Zhou 		}
485*ffee2dc0SBinbin Zhou 	}
486*ffee2dc0SBinbin Zhou }
487*ffee2dc0SBinbin Zhou 
488*ffee2dc0SBinbin Zhou static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
489*ffee2dc0SBinbin Zhou {
490*ffee2dc0SBinbin Zhou 	struct ls1x_dma_chan *chan = data;
491*ffee2dc0SBinbin Zhou 	struct dma_chan *dchan = &chan->vc.chan;
492*ffee2dc0SBinbin Zhou 	struct device *dev = chan2dev(dchan);
493*ffee2dc0SBinbin Zhou 	struct virt_dma_desc *vd;
494*ffee2dc0SBinbin Zhou 
495*ffee2dc0SBinbin Zhou 	scoped_guard(spinlock, &chan->vc.lock) {
496*ffee2dc0SBinbin Zhou 		vd = vchan_next_desc(&chan->vc);
497*ffee2dc0SBinbin Zhou 		if (!vd) {
498*ffee2dc0SBinbin Zhou 			dev_warn(dev,
499*ffee2dc0SBinbin Zhou 				 "IRQ %d with no active desc on channel %d\n",
500*ffee2dc0SBinbin Zhou 				 irq, dchan->chan_id);
501*ffee2dc0SBinbin Zhou 			return IRQ_NONE;
502*ffee2dc0SBinbin Zhou 		}
503*ffee2dc0SBinbin Zhou 
504*ffee2dc0SBinbin Zhou 		if (chan->is_cyclic) {
505*ffee2dc0SBinbin Zhou 			vchan_cyclic_callback(vd);
506*ffee2dc0SBinbin Zhou 		} else {
507*ffee2dc0SBinbin Zhou 			list_del(&vd->node);
508*ffee2dc0SBinbin Zhou 			vchan_cookie_complete(vd);
509*ffee2dc0SBinbin Zhou 		}
510*ffee2dc0SBinbin Zhou 	}
511*ffee2dc0SBinbin Zhou 
512*ffee2dc0SBinbin Zhou 	dev_dbg(dev, "DMA IRQ %d on channel %d\n", irq, dchan->chan_id);
513*ffee2dc0SBinbin Zhou 
514*ffee2dc0SBinbin Zhou 	return IRQ_HANDLED;
515*ffee2dc0SBinbin Zhou }
516*ffee2dc0SBinbin Zhou 
517*ffee2dc0SBinbin Zhou static int ls1x_dma_chan_probe(struct platform_device *pdev,
518*ffee2dc0SBinbin Zhou 			       struct ls1x_dma *dma)
519*ffee2dc0SBinbin Zhou {
520*ffee2dc0SBinbin Zhou 	void __iomem *reg_base;
521*ffee2dc0SBinbin Zhou 	int id;
522*ffee2dc0SBinbin Zhou 
523*ffee2dc0SBinbin Zhou 	reg_base = devm_platform_ioremap_resource(pdev, 0);
524*ffee2dc0SBinbin Zhou 	if (IS_ERR(reg_base))
525*ffee2dc0SBinbin Zhou 		return PTR_ERR(reg_base);
526*ffee2dc0SBinbin Zhou 
527*ffee2dc0SBinbin Zhou 	for (id = 0; id < dma->nr_chans; id++) {
528*ffee2dc0SBinbin Zhou 		struct ls1x_dma_chan *chan = &dma->chan[id];
529*ffee2dc0SBinbin Zhou 		char pdev_irqname[16];
530*ffee2dc0SBinbin Zhou 
531*ffee2dc0SBinbin Zhou 		snprintf(pdev_irqname, sizeof(pdev_irqname), "ch%d", id);
532*ffee2dc0SBinbin Zhou 		chan->irq = platform_get_irq_byname(pdev, pdev_irqname);
533*ffee2dc0SBinbin Zhou 		if (chan->irq < 0)
534*ffee2dc0SBinbin Zhou 			return dev_err_probe(&pdev->dev, chan->irq,
535*ffee2dc0SBinbin Zhou 					     "failed to get IRQ for ch%d\n",
536*ffee2dc0SBinbin Zhou 					     id);
537*ffee2dc0SBinbin Zhou 
538*ffee2dc0SBinbin Zhou 		chan->reg_base = reg_base;
539*ffee2dc0SBinbin Zhou 		chan->vc.desc_free = ls1x_dma_free_desc;
540*ffee2dc0SBinbin Zhou 		vchan_init(&chan->vc, &dma->ddev);
541*ffee2dc0SBinbin Zhou 	}
542*ffee2dc0SBinbin Zhou 
543*ffee2dc0SBinbin Zhou 	return 0;
544*ffee2dc0SBinbin Zhou }
545*ffee2dc0SBinbin Zhou 
546*ffee2dc0SBinbin Zhou static void ls1x_dma_chan_remove(struct ls1x_dma *dma)
547*ffee2dc0SBinbin Zhou {
548*ffee2dc0SBinbin Zhou 	int id;
549*ffee2dc0SBinbin Zhou 
550*ffee2dc0SBinbin Zhou 	for (id = 0; id < dma->nr_chans; id++) {
551*ffee2dc0SBinbin Zhou 		struct ls1x_dma_chan *chan = &dma->chan[id];
552*ffee2dc0SBinbin Zhou 
553*ffee2dc0SBinbin Zhou 		if (chan->vc.chan.device == &dma->ddev) {
554*ffee2dc0SBinbin Zhou 			list_del(&chan->vc.chan.device_node);
555*ffee2dc0SBinbin Zhou 			tasklet_kill(&chan->vc.task);
556*ffee2dc0SBinbin Zhou 		}
557*ffee2dc0SBinbin Zhou 	}
558*ffee2dc0SBinbin Zhou }
559*ffee2dc0SBinbin Zhou 
560*ffee2dc0SBinbin Zhou static int ls1x_dma_probe(struct platform_device *pdev)
561*ffee2dc0SBinbin Zhou {
562*ffee2dc0SBinbin Zhou 	struct device *dev = &pdev->dev;
563*ffee2dc0SBinbin Zhou 	struct dma_device *ddev;
564*ffee2dc0SBinbin Zhou 	struct ls1x_dma *dma;
565*ffee2dc0SBinbin Zhou 	int ret;
566*ffee2dc0SBinbin Zhou 
567*ffee2dc0SBinbin Zhou 	ret = platform_irq_count(pdev);
568*ffee2dc0SBinbin Zhou 	if (ret <= 0 || ret > LS1X_DMA_MAX_CHANNELS)
569*ffee2dc0SBinbin Zhou 		return dev_err_probe(dev, -EINVAL,
570*ffee2dc0SBinbin Zhou 				     "Invalid number of IRQ channels: %d\n",
571*ffee2dc0SBinbin Zhou 				     ret);
572*ffee2dc0SBinbin Zhou 
573*ffee2dc0SBinbin Zhou 	dma = devm_kzalloc(dev, struct_size(dma, chan, ret), GFP_KERNEL);
574*ffee2dc0SBinbin Zhou 	if (!dma)
575*ffee2dc0SBinbin Zhou 		return -ENOMEM;
576*ffee2dc0SBinbin Zhou 	dma->nr_chans = ret;
577*ffee2dc0SBinbin Zhou 
578*ffee2dc0SBinbin Zhou 	/* initialize DMA device */
579*ffee2dc0SBinbin Zhou 	ddev = &dma->ddev;
580*ffee2dc0SBinbin Zhou 	ddev->dev = dev;
581*ffee2dc0SBinbin Zhou 	ddev->copy_align = DMAENGINE_ALIGN_4_BYTES;
582*ffee2dc0SBinbin Zhou 	ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
583*ffee2dc0SBinbin Zhou 				BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
584*ffee2dc0SBinbin Zhou 				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
585*ffee2dc0SBinbin Zhou 	ddev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
586*ffee2dc0SBinbin Zhou 				BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
587*ffee2dc0SBinbin Zhou 				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
588*ffee2dc0SBinbin Zhou 	ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
589*ffee2dc0SBinbin Zhou 	ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
590*ffee2dc0SBinbin Zhou 	ddev->device_alloc_chan_resources = ls1x_dma_alloc_chan_resources;
591*ffee2dc0SBinbin Zhou 	ddev->device_free_chan_resources = ls1x_dma_free_chan_resources;
592*ffee2dc0SBinbin Zhou 	ddev->device_prep_slave_sg = ls1x_dma_prep_slave_sg;
593*ffee2dc0SBinbin Zhou 	ddev->device_prep_dma_cyclic = ls1x_dma_prep_dma_cyclic;
594*ffee2dc0SBinbin Zhou 	ddev->device_config = ls1x_dma_slave_config;
595*ffee2dc0SBinbin Zhou 	ddev->device_pause = ls1x_dma_pause;
596*ffee2dc0SBinbin Zhou 	ddev->device_resume = ls1x_dma_resume;
597*ffee2dc0SBinbin Zhou 	ddev->device_terminate_all = ls1x_dma_terminate_all;
598*ffee2dc0SBinbin Zhou 	ddev->device_synchronize = ls1x_dma_synchronize;
599*ffee2dc0SBinbin Zhou 	ddev->device_tx_status = ls1x_dma_tx_status;
600*ffee2dc0SBinbin Zhou 	ddev->device_issue_pending = ls1x_dma_issue_pending;
601*ffee2dc0SBinbin Zhou 	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
602*ffee2dc0SBinbin Zhou 	INIT_LIST_HEAD(&ddev->channels);
603*ffee2dc0SBinbin Zhou 
604*ffee2dc0SBinbin Zhou 	/* initialize DMA channels */
605*ffee2dc0SBinbin Zhou 	ret = ls1x_dma_chan_probe(pdev, dma);
606*ffee2dc0SBinbin Zhou 	if (ret)
607*ffee2dc0SBinbin Zhou 		goto err;
608*ffee2dc0SBinbin Zhou 
609*ffee2dc0SBinbin Zhou 	ret = dmaenginem_async_device_register(ddev);
610*ffee2dc0SBinbin Zhou 	if (ret) {
611*ffee2dc0SBinbin Zhou 		dev_err(dev, "failed to register DMA device\n");
612*ffee2dc0SBinbin Zhou 		goto err;
613*ffee2dc0SBinbin Zhou 	}
614*ffee2dc0SBinbin Zhou 
615*ffee2dc0SBinbin Zhou 	ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
616*ffee2dc0SBinbin Zhou 					 ddev);
617*ffee2dc0SBinbin Zhou 	if (ret) {
618*ffee2dc0SBinbin Zhou 		dev_err(dev, "failed to register DMA controller\n");
619*ffee2dc0SBinbin Zhou 		goto err;
620*ffee2dc0SBinbin Zhou 	}
621*ffee2dc0SBinbin Zhou 
622*ffee2dc0SBinbin Zhou 	platform_set_drvdata(pdev, dma);
623*ffee2dc0SBinbin Zhou 	dev_info(dev, "Loongson1 DMA driver registered\n");
624*ffee2dc0SBinbin Zhou 
625*ffee2dc0SBinbin Zhou 	return 0;
626*ffee2dc0SBinbin Zhou 
627*ffee2dc0SBinbin Zhou err:
628*ffee2dc0SBinbin Zhou 	ls1x_dma_chan_remove(dma);
629*ffee2dc0SBinbin Zhou 
630*ffee2dc0SBinbin Zhou 	return ret;
631*ffee2dc0SBinbin Zhou }
632*ffee2dc0SBinbin Zhou 
633*ffee2dc0SBinbin Zhou static void ls1x_dma_remove(struct platform_device *pdev)
634*ffee2dc0SBinbin Zhou {
635*ffee2dc0SBinbin Zhou 	struct ls1x_dma *dma = platform_get_drvdata(pdev);
636*ffee2dc0SBinbin Zhou 
637*ffee2dc0SBinbin Zhou 	of_dma_controller_free(pdev->dev.of_node);
638*ffee2dc0SBinbin Zhou 	ls1x_dma_chan_remove(dma);
639*ffee2dc0SBinbin Zhou }
640*ffee2dc0SBinbin Zhou 
641*ffee2dc0SBinbin Zhou static const struct of_device_id ls1x_dma_match[] = {
642*ffee2dc0SBinbin Zhou 	{ .compatible = "loongson,ls1b-apbdma" },
643*ffee2dc0SBinbin Zhou 	{ /* sentinel */ }
644*ffee2dc0SBinbin Zhou };
645*ffee2dc0SBinbin Zhou MODULE_DEVICE_TABLE(of, ls1x_dma_match);
646*ffee2dc0SBinbin Zhou 
647*ffee2dc0SBinbin Zhou static struct platform_driver ls1x_dma_driver = {
648*ffee2dc0SBinbin Zhou 	.probe = ls1x_dma_probe,
649*ffee2dc0SBinbin Zhou 	.remove = ls1x_dma_remove,
650*ffee2dc0SBinbin Zhou 	.driver = {
651*ffee2dc0SBinbin Zhou 		.name = KBUILD_MODNAME,
652*ffee2dc0SBinbin Zhou 		.of_match_table = ls1x_dma_match,
653*ffee2dc0SBinbin Zhou 	},
654*ffee2dc0SBinbin Zhou };
655*ffee2dc0SBinbin Zhou 
656*ffee2dc0SBinbin Zhou module_platform_driver(ls1x_dma_driver);
657*ffee2dc0SBinbin Zhou 
658*ffee2dc0SBinbin Zhou MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
659*ffee2dc0SBinbin Zhou MODULE_DESCRIPTION("Loongson-1 APB DMA Controller driver");
660*ffee2dc0SBinbin Zhou MODULE_LICENSE("GPL");
661