xref: /linux/drivers/dma/loongson/loongson2-apb-dma.c (revision d662a710c668a86a39ebaad334d9960a0cc776c2)
1ffee2dc0SBinbin Zhou // SPDX-License-Identifier: GPL-2.0-or-later
2ffee2dc0SBinbin Zhou /*
3ffee2dc0SBinbin Zhou  * Driver for the Loongson-2 APB DMA Controller
4ffee2dc0SBinbin Zhou  *
5ffee2dc0SBinbin Zhou  * Copyright (C) 2017-2023 Loongson Corporation
6ffee2dc0SBinbin Zhou  */
7ffee2dc0SBinbin Zhou 
8ffee2dc0SBinbin Zhou #include <linux/clk.h>
9ffee2dc0SBinbin Zhou #include <linux/dma-mapping.h>
10ffee2dc0SBinbin Zhou #include <linux/dmapool.h>
11ffee2dc0SBinbin Zhou #include <linux/interrupt.h>
12ffee2dc0SBinbin Zhou #include <linux/io.h>
13ffee2dc0SBinbin Zhou #include <linux/io-64-nonatomic-lo-hi.h>
14ffee2dc0SBinbin Zhou #include <linux/module.h>
15ffee2dc0SBinbin Zhou #include <linux/of.h>
16ffee2dc0SBinbin Zhou #include <linux/of_dma.h>
17ffee2dc0SBinbin Zhou #include <linux/platform_device.h>
18ffee2dc0SBinbin Zhou #include <linux/slab.h>
19ffee2dc0SBinbin Zhou 
20ffee2dc0SBinbin Zhou #include "../dmaengine.h"
21ffee2dc0SBinbin Zhou #include "../virt-dma.h"
22ffee2dc0SBinbin Zhou 
23ffee2dc0SBinbin Zhou /* Global Configuration Register */
24ffee2dc0SBinbin Zhou #define LDMA_ORDER_ERG		0x0
25ffee2dc0SBinbin Zhou 
26ffee2dc0SBinbin Zhou /* Bitfield definitions */
27ffee2dc0SBinbin Zhou 
28ffee2dc0SBinbin Zhou /* Bitfields in Global Configuration Register */
29ffee2dc0SBinbin Zhou #define LDMA_64BIT_EN		BIT(0) /* 1: 64 bit support */
30ffee2dc0SBinbin Zhou #define LDMA_UNCOHERENT_EN	BIT(1) /* 0: cache, 1: uncache */
31ffee2dc0SBinbin Zhou #define LDMA_ASK_VALID		BIT(2)
32ffee2dc0SBinbin Zhou #define LDMA_START		BIT(3) /* DMA start operation */
33ffee2dc0SBinbin Zhou #define LDMA_STOP		BIT(4) /* DMA stop operation */
34ffee2dc0SBinbin Zhou #define LDMA_CONFIG_MASK	GENMASK_ULL(4, 0) /* DMA controller config bits mask */
35ffee2dc0SBinbin Zhou 
36ffee2dc0SBinbin Zhou /* Bitfields in ndesc_addr field of HW descriptor */
37ffee2dc0SBinbin Zhou #define LDMA_DESC_EN		BIT(0) /*1: The next descriptor is valid */
38ffee2dc0SBinbin Zhou #define LDMA_DESC_ADDR_LOW	GENMASK(31, 1)
39ffee2dc0SBinbin Zhou 
40ffee2dc0SBinbin Zhou /* Bitfields in cmd field of HW descriptor */
41ffee2dc0SBinbin Zhou #define LDMA_INT		BIT(1) /* Enable DMA interrupts */
42ffee2dc0SBinbin Zhou #define LDMA_DATA_DIRECTION	BIT(12) /* 1: write to device, 0: read from device */
43ffee2dc0SBinbin Zhou 
44ffee2dc0SBinbin Zhou #define LDMA_SLAVE_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
45ffee2dc0SBinbin Zhou 				 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
46ffee2dc0SBinbin Zhou 
47ffee2dc0SBinbin Zhou #define LDMA_MAX_TRANS_LEN	U32_MAX
48ffee2dc0SBinbin Zhou 
49ffee2dc0SBinbin Zhou /*--  descriptors  -----------------------------------------------------*/
50ffee2dc0SBinbin Zhou 
51ffee2dc0SBinbin Zhou /*
52ffee2dc0SBinbin Zhou  * struct ls2x_dma_hw_desc - DMA HW descriptor
53ffee2dc0SBinbin Zhou  * @ndesc_addr: the next descriptor low address.
54ffee2dc0SBinbin Zhou  * @mem_addr: memory low address.
55ffee2dc0SBinbin Zhou  * @apb_addr: device buffer address.
56ffee2dc0SBinbin Zhou  * @len: length of a piece of carried content, in words.
57ffee2dc0SBinbin Zhou  * @step_len: length between two moved memory data blocks.
58ffee2dc0SBinbin Zhou  * @step_times: number of blocks to be carried in a single DMA operation.
59ffee2dc0SBinbin Zhou  * @cmd: descriptor command or state.
60ffee2dc0SBinbin Zhou  * @stats: DMA status.
61ffee2dc0SBinbin Zhou  * @high_ndesc_addr: the next descriptor high address.
62ffee2dc0SBinbin Zhou  * @high_mem_addr: memory high address.
63ffee2dc0SBinbin Zhou  * @reserved: reserved
64ffee2dc0SBinbin Zhou  */
65ffee2dc0SBinbin Zhou struct ls2x_dma_hw_desc {
66ffee2dc0SBinbin Zhou 	u32 ndesc_addr;
67ffee2dc0SBinbin Zhou 	u32 mem_addr;
68ffee2dc0SBinbin Zhou 	u32 apb_addr;
69ffee2dc0SBinbin Zhou 	u32 len;
70ffee2dc0SBinbin Zhou 	u32 step_len;
71ffee2dc0SBinbin Zhou 	u32 step_times;
72ffee2dc0SBinbin Zhou 	u32 cmd;
73ffee2dc0SBinbin Zhou 	u32 stats;
74ffee2dc0SBinbin Zhou 	u32 high_ndesc_addr;
75ffee2dc0SBinbin Zhou 	u32 high_mem_addr;
76ffee2dc0SBinbin Zhou 	u32 reserved[2];
77ffee2dc0SBinbin Zhou } __packed;
78ffee2dc0SBinbin Zhou 
79ffee2dc0SBinbin Zhou /*
80ffee2dc0SBinbin Zhou  * struct ls2x_dma_sg - ls2x dma scatter gather entry
81ffee2dc0SBinbin Zhou  * @hw: the pointer to DMA HW descriptor.
82ffee2dc0SBinbin Zhou  * @llp: physical address of the DMA HW descriptor.
83ffee2dc0SBinbin Zhou  * @phys: destination or source address(mem).
84ffee2dc0SBinbin Zhou  * @len: number of Bytes to read.
85ffee2dc0SBinbin Zhou  */
86ffee2dc0SBinbin Zhou struct ls2x_dma_sg {
87ffee2dc0SBinbin Zhou 	struct ls2x_dma_hw_desc	*hw;
88ffee2dc0SBinbin Zhou 	dma_addr_t		llp;
89ffee2dc0SBinbin Zhou 	dma_addr_t		phys;
90ffee2dc0SBinbin Zhou 	u32			len;
91ffee2dc0SBinbin Zhou };
92ffee2dc0SBinbin Zhou 
93ffee2dc0SBinbin Zhou /*
94ffee2dc0SBinbin Zhou  * struct ls2x_dma_desc - software descriptor
95ffee2dc0SBinbin Zhou  * @vdesc: pointer to the virtual dma descriptor.
96ffee2dc0SBinbin Zhou  * @cyclic: flag to dma cyclic
97ffee2dc0SBinbin Zhou  * @burst_size: burst size of transaction, in words.
98ffee2dc0SBinbin Zhou  * @desc_num: number of sg entries.
99ffee2dc0SBinbin Zhou  * @direction: transfer direction, to or from device.
100ffee2dc0SBinbin Zhou  * @status: dma controller status.
101ffee2dc0SBinbin Zhou  * @sg: array of sgs.
102ffee2dc0SBinbin Zhou  */
103ffee2dc0SBinbin Zhou struct ls2x_dma_desc {
104ffee2dc0SBinbin Zhou 	struct virt_dma_desc		vdesc;
105ffee2dc0SBinbin Zhou 	bool				cyclic;
106ffee2dc0SBinbin Zhou 	size_t				burst_size;
107ffee2dc0SBinbin Zhou 	u32				desc_num;
108ffee2dc0SBinbin Zhou 	enum dma_transfer_direction	direction;
109ffee2dc0SBinbin Zhou 	enum dma_status			status;
110ffee2dc0SBinbin Zhou 	struct ls2x_dma_sg		sg[] __counted_by(desc_num);
111ffee2dc0SBinbin Zhou };
112ffee2dc0SBinbin Zhou 
113ffee2dc0SBinbin Zhou /*--  Channels  --------------------------------------------------------*/
114ffee2dc0SBinbin Zhou 
115ffee2dc0SBinbin Zhou /*
116ffee2dc0SBinbin Zhou  * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
117ffee2dc0SBinbin Zhou  * @vchan: virtual dma channel entry.
118ffee2dc0SBinbin Zhou  * @desc: pointer to the ls2x sw dma descriptor.
119ffee2dc0SBinbin Zhou  * @pool: hw desc table
120ffee2dc0SBinbin Zhou  * @irq: irq line
121ffee2dc0SBinbin Zhou  * @sconfig: configuration for slave transfers, passed via .device_config
122ffee2dc0SBinbin Zhou  */
123ffee2dc0SBinbin Zhou struct ls2x_dma_chan {
124ffee2dc0SBinbin Zhou 	struct virt_dma_chan	vchan;
125ffee2dc0SBinbin Zhou 	struct ls2x_dma_desc	*desc;
126ffee2dc0SBinbin Zhou 	void			*pool;
127ffee2dc0SBinbin Zhou 	int			irq;
128ffee2dc0SBinbin Zhou 	struct dma_slave_config	sconfig;
129ffee2dc0SBinbin Zhou };
130ffee2dc0SBinbin Zhou 
131ffee2dc0SBinbin Zhou /*--  Controller  ------------------------------------------------------*/
132ffee2dc0SBinbin Zhou 
133ffee2dc0SBinbin Zhou /*
134ffee2dc0SBinbin Zhou  * struct ls2x_dma_priv - LS2X APB DMAC specific information
135ffee2dc0SBinbin Zhou  * @ddev: dmaengine dma_device object members
136ffee2dc0SBinbin Zhou  * @dma_clk: DMAC clock source
137ffee2dc0SBinbin Zhou  * @regs: memory mapped register base
138ffee2dc0SBinbin Zhou  * @lchan: channel to store ls2x_dma_chan structures
139ffee2dc0SBinbin Zhou  */
140ffee2dc0SBinbin Zhou struct ls2x_dma_priv {
141ffee2dc0SBinbin Zhou 	struct dma_device	ddev;
142ffee2dc0SBinbin Zhou 	struct clk		*dma_clk;
143ffee2dc0SBinbin Zhou 	void __iomem		*regs;
144ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan	lchan;
145ffee2dc0SBinbin Zhou };
146ffee2dc0SBinbin Zhou 
147ffee2dc0SBinbin Zhou /*--  Helper functions  ------------------------------------------------*/
148ffee2dc0SBinbin Zhou 
149ffee2dc0SBinbin Zhou static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
150ffee2dc0SBinbin Zhou {
151ffee2dc0SBinbin Zhou 	return container_of(vdesc, struct ls2x_dma_desc, vdesc);
152ffee2dc0SBinbin Zhou }
153ffee2dc0SBinbin Zhou 
154ffee2dc0SBinbin Zhou static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
155ffee2dc0SBinbin Zhou {
156ffee2dc0SBinbin Zhou 	return container_of(chan, struct ls2x_dma_chan, vchan.chan);
157ffee2dc0SBinbin Zhou }
158ffee2dc0SBinbin Zhou 
159ffee2dc0SBinbin Zhou static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
160ffee2dc0SBinbin Zhou {
161ffee2dc0SBinbin Zhou 	return container_of(ddev, struct ls2x_dma_priv, ddev);
162ffee2dc0SBinbin Zhou }
163ffee2dc0SBinbin Zhou 
164ffee2dc0SBinbin Zhou static struct device *chan2dev(struct dma_chan *chan)
165ffee2dc0SBinbin Zhou {
166ffee2dc0SBinbin Zhou 	return &chan->dev->device;
167ffee2dc0SBinbin Zhou }
168ffee2dc0SBinbin Zhou 
169ffee2dc0SBinbin Zhou static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
170ffee2dc0SBinbin Zhou {
171ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
172ffee2dc0SBinbin Zhou 	struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
173ffee2dc0SBinbin Zhou 	int i;
174ffee2dc0SBinbin Zhou 
175ffee2dc0SBinbin Zhou 	for (i = 0; i < desc->desc_num; i++) {
176ffee2dc0SBinbin Zhou 		if (desc->sg[i].hw)
177ffee2dc0SBinbin Zhou 			dma_pool_free(lchan->pool, desc->sg[i].hw,
178ffee2dc0SBinbin Zhou 				      desc->sg[i].llp);
179ffee2dc0SBinbin Zhou 	}
180ffee2dc0SBinbin Zhou 
181ffee2dc0SBinbin Zhou 	kfree(desc);
182ffee2dc0SBinbin Zhou }
183ffee2dc0SBinbin Zhou 
184ffee2dc0SBinbin Zhou static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
185ffee2dc0SBinbin Zhou {
186ffee2dc0SBinbin Zhou 	struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
187ffee2dc0SBinbin Zhou 	u64 val;
188ffee2dc0SBinbin Zhou 
189ffee2dc0SBinbin Zhou 	val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
190ffee2dc0SBinbin Zhou 	val |= LDMA_64BIT_EN | cmd;
191ffee2dc0SBinbin Zhou 	lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
192ffee2dc0SBinbin Zhou }
193ffee2dc0SBinbin Zhou 
194ffee2dc0SBinbin Zhou static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
195ffee2dc0SBinbin Zhou {
196ffee2dc0SBinbin Zhou 	struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
197ffee2dc0SBinbin Zhou 	struct ls2x_dma_sg *ldma_sg;
198ffee2dc0SBinbin Zhou 	struct virt_dma_desc *vdesc;
199ffee2dc0SBinbin Zhou 	u64 val;
200ffee2dc0SBinbin Zhou 
201ffee2dc0SBinbin Zhou 	/* Get the next descriptor */
202ffee2dc0SBinbin Zhou 	vdesc = vchan_next_desc(&lchan->vchan);
203ffee2dc0SBinbin Zhou 	if (!vdesc) {
204ffee2dc0SBinbin Zhou 		lchan->desc = NULL;
205ffee2dc0SBinbin Zhou 		return;
206ffee2dc0SBinbin Zhou 	}
207ffee2dc0SBinbin Zhou 
208ffee2dc0SBinbin Zhou 	list_del(&vdesc->node);
209ffee2dc0SBinbin Zhou 	lchan->desc = to_ldma_desc(vdesc);
210ffee2dc0SBinbin Zhou 	ldma_sg = &lchan->desc->sg[0];
211ffee2dc0SBinbin Zhou 
212ffee2dc0SBinbin Zhou 	/* Start DMA */
213ffee2dc0SBinbin Zhou 	lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
214ffee2dc0SBinbin Zhou 	val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
215ffee2dc0SBinbin Zhou 	lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
216ffee2dc0SBinbin Zhou }
217ffee2dc0SBinbin Zhou 
218ffee2dc0SBinbin Zhou static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
219ffee2dc0SBinbin Zhou {
220ffee2dc0SBinbin Zhou 	u32 maxburst, buswidth;
221ffee2dc0SBinbin Zhou 
222ffee2dc0SBinbin Zhou 	/* Reject definitely invalid configurations */
223ffee2dc0SBinbin Zhou 	if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
224ffee2dc0SBinbin Zhou 	    (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
225ffee2dc0SBinbin Zhou 		return 0;
226ffee2dc0SBinbin Zhou 
227ffee2dc0SBinbin Zhou 	if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
228ffee2dc0SBinbin Zhou 		maxburst = lchan->sconfig.dst_maxburst;
229ffee2dc0SBinbin Zhou 		buswidth = lchan->sconfig.dst_addr_width;
230ffee2dc0SBinbin Zhou 	} else {
231ffee2dc0SBinbin Zhou 		maxburst = lchan->sconfig.src_maxburst;
232ffee2dc0SBinbin Zhou 		buswidth = lchan->sconfig.src_addr_width;
233ffee2dc0SBinbin Zhou 	}
234ffee2dc0SBinbin Zhou 
235ffee2dc0SBinbin Zhou 	/* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
236ffee2dc0SBinbin Zhou 	return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
237ffee2dc0SBinbin Zhou }
238ffee2dc0SBinbin Zhou 
239ffee2dc0SBinbin Zhou static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
240ffee2dc0SBinbin Zhou 			       struct ls2x_dma_desc *desc)
241ffee2dc0SBinbin Zhou {
242ffee2dc0SBinbin Zhou 	struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
243ffee2dc0SBinbin Zhou 	u32 num_segments, segment_size;
244ffee2dc0SBinbin Zhou 
245ffee2dc0SBinbin Zhou 	if (desc->direction == DMA_MEM_TO_DEV) {
246ffee2dc0SBinbin Zhou 		ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
247ffee2dc0SBinbin Zhou 		ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
248ffee2dc0SBinbin Zhou 	} else {
249ffee2dc0SBinbin Zhou 		ldma_sg->hw->cmd = LDMA_INT;
250ffee2dc0SBinbin Zhou 		ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
251ffee2dc0SBinbin Zhou 	}
252ffee2dc0SBinbin Zhou 
253ffee2dc0SBinbin Zhou 	ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
254ffee2dc0SBinbin Zhou 	ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
255ffee2dc0SBinbin Zhou 
256ffee2dc0SBinbin Zhou 	/* Split into multiple equally sized segments if necessary */
257ffee2dc0SBinbin Zhou 	num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
258ffee2dc0SBinbin Zhou 	segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
259ffee2dc0SBinbin Zhou 
260ffee2dc0SBinbin Zhou 	/* Word count register takes input in words */
261ffee2dc0SBinbin Zhou 	ldma_sg->hw->len = segment_size;
262ffee2dc0SBinbin Zhou 	ldma_sg->hw->step_times = num_segments;
263ffee2dc0SBinbin Zhou 	ldma_sg->hw->step_len = 0;
264ffee2dc0SBinbin Zhou 
265ffee2dc0SBinbin Zhou 	/* lets make a link list */
266ffee2dc0SBinbin Zhou 	if (sg_index) {
267ffee2dc0SBinbin Zhou 		desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
268ffee2dc0SBinbin Zhou 		desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
269ffee2dc0SBinbin Zhou 	}
270ffee2dc0SBinbin Zhou }
271ffee2dc0SBinbin Zhou 
272ffee2dc0SBinbin Zhou /*--  DMA Engine API  --------------------------------------------------*/
273ffee2dc0SBinbin Zhou 
274ffee2dc0SBinbin Zhou /*
275ffee2dc0SBinbin Zhou  * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
276ffee2dc0SBinbin Zhou  * @chan: allocate descriptor resources for this channel
277ffee2dc0SBinbin Zhou  *
278ffee2dc0SBinbin Zhou  * return - the number of allocated descriptors
279ffee2dc0SBinbin Zhou  */
280ffee2dc0SBinbin Zhou static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
281ffee2dc0SBinbin Zhou {
282ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
283ffee2dc0SBinbin Zhou 
284ffee2dc0SBinbin Zhou 	/* Create a pool of consistent memory blocks for hardware descriptors */
285ffee2dc0SBinbin Zhou 	lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
286ffee2dc0SBinbin Zhou 				      chan->device->dev, PAGE_SIZE,
287ffee2dc0SBinbin Zhou 				      __alignof__(struct ls2x_dma_hw_desc), 0);
288ffee2dc0SBinbin Zhou 	if (!lchan->pool) {
289ffee2dc0SBinbin Zhou 		dev_err(chan2dev(chan), "No memory for descriptors\n");
290ffee2dc0SBinbin Zhou 		return -ENOMEM;
291ffee2dc0SBinbin Zhou 	}
292ffee2dc0SBinbin Zhou 
293ffee2dc0SBinbin Zhou 	return 1;
294ffee2dc0SBinbin Zhou }
295ffee2dc0SBinbin Zhou 
296ffee2dc0SBinbin Zhou /*
297ffee2dc0SBinbin Zhou  * ls2x_dma_free_chan_resources - free all channel resources
298ffee2dc0SBinbin Zhou  * @chan: DMA channel
299ffee2dc0SBinbin Zhou  */
300ffee2dc0SBinbin Zhou static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
301ffee2dc0SBinbin Zhou {
302ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
303ffee2dc0SBinbin Zhou 
304ffee2dc0SBinbin Zhou 	vchan_free_chan_resources(to_virt_chan(chan));
305ffee2dc0SBinbin Zhou 	dma_pool_destroy(lchan->pool);
306ffee2dc0SBinbin Zhou 	lchan->pool = NULL;
307ffee2dc0SBinbin Zhou }
308ffee2dc0SBinbin Zhou 
309ffee2dc0SBinbin Zhou /*
310ffee2dc0SBinbin Zhou  * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
311ffee2dc0SBinbin Zhou  * @chan: DMA channel
312ffee2dc0SBinbin Zhou  * @sgl: scatterlist to transfer to/from
313ffee2dc0SBinbin Zhou  * @sg_len: number of entries in @scatterlist
314ffee2dc0SBinbin Zhou  * @direction: DMA direction
315ffee2dc0SBinbin Zhou  * @flags: tx descriptor status flags
316ffee2dc0SBinbin Zhou  * @context: transaction context (ignored)
317ffee2dc0SBinbin Zhou  *
318ffee2dc0SBinbin Zhou  * Return: Async transaction descriptor on success and NULL on failure
319ffee2dc0SBinbin Zhou  */
320ffee2dc0SBinbin Zhou static struct dma_async_tx_descriptor *
321ffee2dc0SBinbin Zhou ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
322ffee2dc0SBinbin Zhou 		       u32 sg_len, enum dma_transfer_direction direction,
323ffee2dc0SBinbin Zhou 		       unsigned long flags, void *context)
324ffee2dc0SBinbin Zhou {
325ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
326ffee2dc0SBinbin Zhou 	struct ls2x_dma_desc *desc;
327ffee2dc0SBinbin Zhou 	struct scatterlist *sg;
328ffee2dc0SBinbin Zhou 	size_t burst_size;
329ffee2dc0SBinbin Zhou 	int i;
330ffee2dc0SBinbin Zhou 
331ffee2dc0SBinbin Zhou 	if (unlikely(!sg_len || !is_slave_direction(direction)))
332ffee2dc0SBinbin Zhou 		return NULL;
333ffee2dc0SBinbin Zhou 
334ffee2dc0SBinbin Zhou 	burst_size = ls2x_dmac_detect_burst(lchan);
335ffee2dc0SBinbin Zhou 	if (!burst_size)
336ffee2dc0SBinbin Zhou 		return NULL;
337ffee2dc0SBinbin Zhou 
338ffee2dc0SBinbin Zhou 	desc = kzalloc_flex(*desc, sg, sg_len, GFP_NOWAIT);
339ffee2dc0SBinbin Zhou 	if (!desc)
340ffee2dc0SBinbin Zhou 		return NULL;
341ffee2dc0SBinbin Zhou 
342ffee2dc0SBinbin Zhou 	desc->desc_num = sg_len;
343ffee2dc0SBinbin Zhou 	desc->direction = direction;
344ffee2dc0SBinbin Zhou 	desc->burst_size = burst_size;
345ffee2dc0SBinbin Zhou 
346ffee2dc0SBinbin Zhou 	for_each_sg(sgl, sg, sg_len, i) {
347ffee2dc0SBinbin Zhou 		struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
348ffee2dc0SBinbin Zhou 
349ffee2dc0SBinbin Zhou 		/* Allocate DMA capable memory for hardware descriptor */
350ffee2dc0SBinbin Zhou 		ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
351ffee2dc0SBinbin Zhou 		if (!ldma_sg->hw) {
352ffee2dc0SBinbin Zhou 			desc->desc_num = i;
353ffee2dc0SBinbin Zhou 			ls2x_dma_desc_free(&desc->vdesc);
354ffee2dc0SBinbin Zhou 			return NULL;
355ffee2dc0SBinbin Zhou 		}
356ffee2dc0SBinbin Zhou 
357ffee2dc0SBinbin Zhou 		ldma_sg->phys = sg_dma_address(sg);
358ffee2dc0SBinbin Zhou 		ldma_sg->len = sg_dma_len(sg);
359ffee2dc0SBinbin Zhou 
360ffee2dc0SBinbin Zhou 		ls2x_dma_fill_desc(lchan, i, desc);
361ffee2dc0SBinbin Zhou 	}
362ffee2dc0SBinbin Zhou 
363ffee2dc0SBinbin Zhou 	/* Setting the last descriptor enable bit */
364ffee2dc0SBinbin Zhou 	desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
365ffee2dc0SBinbin Zhou 	desc->status = DMA_IN_PROGRESS;
366ffee2dc0SBinbin Zhou 
367ffee2dc0SBinbin Zhou 	return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
368ffee2dc0SBinbin Zhou }
369ffee2dc0SBinbin Zhou 
370ffee2dc0SBinbin Zhou /*
371ffee2dc0SBinbin Zhou  * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
372ffee2dc0SBinbin Zhou  * @chan: the DMA channel to prepare
373ffee2dc0SBinbin Zhou  * @buf_addr: physical DMA address where the buffer starts
374ffee2dc0SBinbin Zhou  * @buf_len: total number of bytes for the entire buffer
375ffee2dc0SBinbin Zhou  * @period_len: number of bytes for each period
376ffee2dc0SBinbin Zhou  * @direction: transfer direction, to or from device
377ffee2dc0SBinbin Zhou  * @flags: tx descriptor status flags
378ffee2dc0SBinbin Zhou  *
379ffee2dc0SBinbin Zhou  * Return: Async transaction descriptor on success and NULL on failure
380ffee2dc0SBinbin Zhou  */
381ffee2dc0SBinbin Zhou static struct dma_async_tx_descriptor *
382ffee2dc0SBinbin Zhou ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
383ffee2dc0SBinbin Zhou 			 size_t period_len, enum dma_transfer_direction direction,
384ffee2dc0SBinbin Zhou 			 unsigned long flags)
385ffee2dc0SBinbin Zhou {
386ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
387ffee2dc0SBinbin Zhou 	struct ls2x_dma_desc *desc;
388ffee2dc0SBinbin Zhou 	size_t burst_size;
389ffee2dc0SBinbin Zhou 	u32 num_periods;
390ffee2dc0SBinbin Zhou 	int i;
391ffee2dc0SBinbin Zhou 
392ffee2dc0SBinbin Zhou 	if (unlikely(!buf_len || !period_len))
393ffee2dc0SBinbin Zhou 		return NULL;
394ffee2dc0SBinbin Zhou 
395ffee2dc0SBinbin Zhou 	if (unlikely(!is_slave_direction(direction)))
396ffee2dc0SBinbin Zhou 		return NULL;
397ffee2dc0SBinbin Zhou 
398ffee2dc0SBinbin Zhou 	burst_size = ls2x_dmac_detect_burst(lchan);
399ffee2dc0SBinbin Zhou 	if (!burst_size)
400ffee2dc0SBinbin Zhou 		return NULL;
401ffee2dc0SBinbin Zhou 
402ffee2dc0SBinbin Zhou 	num_periods = buf_len / period_len;
403ffee2dc0SBinbin Zhou 	desc = kzalloc_flex(*desc, sg, num_periods, GFP_NOWAIT);
404ffee2dc0SBinbin Zhou 	if (!desc)
405ffee2dc0SBinbin Zhou 		return NULL;
406ffee2dc0SBinbin Zhou 
407ffee2dc0SBinbin Zhou 	desc->desc_num = num_periods;
408ffee2dc0SBinbin Zhou 	desc->direction = direction;
409ffee2dc0SBinbin Zhou 	desc->burst_size = burst_size;
410ffee2dc0SBinbin Zhou 
411ffee2dc0SBinbin Zhou 	/* Build cyclic linked list */
412ffee2dc0SBinbin Zhou 	for (i = 0; i < num_periods; i++) {
413ffee2dc0SBinbin Zhou 		struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
414ffee2dc0SBinbin Zhou 
415ffee2dc0SBinbin Zhou 		/* Allocate DMA capable memory for hardware descriptor */
416ffee2dc0SBinbin Zhou 		ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
417ffee2dc0SBinbin Zhou 		if (!ldma_sg->hw) {
418ffee2dc0SBinbin Zhou 			desc->desc_num = i;
419ffee2dc0SBinbin Zhou 			ls2x_dma_desc_free(&desc->vdesc);
420ffee2dc0SBinbin Zhou 			return NULL;
421ffee2dc0SBinbin Zhou 		}
422ffee2dc0SBinbin Zhou 
423ffee2dc0SBinbin Zhou 		ldma_sg->phys = buf_addr + period_len * i;
424ffee2dc0SBinbin Zhou 		ldma_sg->len = period_len;
425ffee2dc0SBinbin Zhou 
426ffee2dc0SBinbin Zhou 		ls2x_dma_fill_desc(lchan, i, desc);
427ffee2dc0SBinbin Zhou 	}
428ffee2dc0SBinbin Zhou 
429ffee2dc0SBinbin Zhou 	/* Lets make a cyclic list */
430ffee2dc0SBinbin Zhou 	desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
431ffee2dc0SBinbin Zhou 	desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
432ffee2dc0SBinbin Zhou 	desc->cyclic = true;
433ffee2dc0SBinbin Zhou 	desc->status = DMA_IN_PROGRESS;
434ffee2dc0SBinbin Zhou 
435ffee2dc0SBinbin Zhou 	return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
436ffee2dc0SBinbin Zhou }
437ffee2dc0SBinbin Zhou 
438ffee2dc0SBinbin Zhou /*
439ffee2dc0SBinbin Zhou  * ls2x_slave_config - set slave configuration for channel
440ffee2dc0SBinbin Zhou  * @chan: dma channel
441ffee2dc0SBinbin Zhou  * @cfg: slave configuration
442ffee2dc0SBinbin Zhou  *
443ffee2dc0SBinbin Zhou  * Sets slave configuration for channel
444ffee2dc0SBinbin Zhou  */
445ffee2dc0SBinbin Zhou static int ls2x_dma_slave_config(struct dma_chan *chan,
446ffee2dc0SBinbin Zhou 				 struct dma_slave_config *config)
447ffee2dc0SBinbin Zhou {
448ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
449ffee2dc0SBinbin Zhou 
450ffee2dc0SBinbin Zhou 	memcpy(&lchan->sconfig, config, sizeof(*config));
451ffee2dc0SBinbin Zhou 	return 0;
452ffee2dc0SBinbin Zhou }
453ffee2dc0SBinbin Zhou 
454ffee2dc0SBinbin Zhou /*
455ffee2dc0SBinbin Zhou  * ls2x_dma_issue_pending - push pending transactions to the hardware
456ffee2dc0SBinbin Zhou  * @chan: channel
457ffee2dc0SBinbin Zhou  *
458ffee2dc0SBinbin Zhou  * When this function is called, all pending transactions are pushed to the
459ffee2dc0SBinbin Zhou  * hardware and executed.
460ffee2dc0SBinbin Zhou  */
461ffee2dc0SBinbin Zhou static void ls2x_dma_issue_pending(struct dma_chan *chan)
462ffee2dc0SBinbin Zhou {
463ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
464ffee2dc0SBinbin Zhou 
465*9de4303fSBinbin Zhou 	guard(spinlock_irqsave)(&lchan->vchan.lock);
466*9de4303fSBinbin Zhou 
467ffee2dc0SBinbin Zhou 	if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
468ffee2dc0SBinbin Zhou 		ls2x_dma_start_transfer(lchan);
469ffee2dc0SBinbin Zhou }
470ffee2dc0SBinbin Zhou 
471ffee2dc0SBinbin Zhou /*
472ffee2dc0SBinbin Zhou  * ls2x_dma_terminate_all - terminate all transactions
473ffee2dc0SBinbin Zhou  * @chan: channel
474ffee2dc0SBinbin Zhou  *
475ffee2dc0SBinbin Zhou  * Stops all DMA transactions.
476ffee2dc0SBinbin Zhou  */
477ffee2dc0SBinbin Zhou static int ls2x_dma_terminate_all(struct dma_chan *chan)
478ffee2dc0SBinbin Zhou {
479ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
480ffee2dc0SBinbin Zhou 	LIST_HEAD(head);
481ffee2dc0SBinbin Zhou 
482*9de4303fSBinbin Zhou 	scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
483ffee2dc0SBinbin Zhou 		/* Setting stop cmd */
484ffee2dc0SBinbin Zhou 		ls2x_dma_write_cmd(lchan, LDMA_STOP);
485ffee2dc0SBinbin Zhou 		if (lchan->desc) {
486ffee2dc0SBinbin Zhou 			vchan_terminate_vdesc(&lchan->desc->vdesc);
487ffee2dc0SBinbin Zhou 			lchan->desc = NULL;
488ffee2dc0SBinbin Zhou 		}
489ffee2dc0SBinbin Zhou 
490ffee2dc0SBinbin Zhou 		vchan_get_all_descriptors(&lchan->vchan, &head);
491*9de4303fSBinbin Zhou 	}
492ffee2dc0SBinbin Zhou 
493ffee2dc0SBinbin Zhou 	vchan_dma_desc_free_list(&lchan->vchan, &head);
494ffee2dc0SBinbin Zhou 	return 0;
495ffee2dc0SBinbin Zhou }
496ffee2dc0SBinbin Zhou 
497ffee2dc0SBinbin Zhou /*
498ffee2dc0SBinbin Zhou  * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
499ffee2dc0SBinbin Zhou  * current context.
500ffee2dc0SBinbin Zhou  * @chan: channel
501ffee2dc0SBinbin Zhou  */
502ffee2dc0SBinbin Zhou static void ls2x_dma_synchronize(struct dma_chan *chan)
503ffee2dc0SBinbin Zhou {
504ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
505ffee2dc0SBinbin Zhou 
506ffee2dc0SBinbin Zhou 	vchan_synchronize(&lchan->vchan);
507ffee2dc0SBinbin Zhou }
508ffee2dc0SBinbin Zhou 
509ffee2dc0SBinbin Zhou static int ls2x_dma_pause(struct dma_chan *chan)
510ffee2dc0SBinbin Zhou {
511ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
512ffee2dc0SBinbin Zhou 
513*9de4303fSBinbin Zhou 	guard(spinlock_irqsave)(&lchan->vchan.lock);
514*9de4303fSBinbin Zhou 
515ffee2dc0SBinbin Zhou 	if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
516ffee2dc0SBinbin Zhou 		ls2x_dma_write_cmd(lchan, LDMA_STOP);
517ffee2dc0SBinbin Zhou 		lchan->desc->status = DMA_PAUSED;
518ffee2dc0SBinbin Zhou 	}
519ffee2dc0SBinbin Zhou 
520ffee2dc0SBinbin Zhou 	return 0;
521ffee2dc0SBinbin Zhou }
522ffee2dc0SBinbin Zhou 
523ffee2dc0SBinbin Zhou static int ls2x_dma_resume(struct dma_chan *chan)
524ffee2dc0SBinbin Zhou {
525ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
526ffee2dc0SBinbin Zhou 
527*9de4303fSBinbin Zhou 	guard(spinlock_irqsave)(&lchan->vchan.lock);
528*9de4303fSBinbin Zhou 
529ffee2dc0SBinbin Zhou 	if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
530ffee2dc0SBinbin Zhou 		lchan->desc->status = DMA_IN_PROGRESS;
531ffee2dc0SBinbin Zhou 		ls2x_dma_write_cmd(lchan, LDMA_START);
532ffee2dc0SBinbin Zhou 	}
533ffee2dc0SBinbin Zhou 
534ffee2dc0SBinbin Zhou 	return 0;
535ffee2dc0SBinbin Zhou }
536ffee2dc0SBinbin Zhou 
537ffee2dc0SBinbin Zhou /*
538ffee2dc0SBinbin Zhou  * ls2x_dma_isr - LS2X DMA Interrupt handler
539ffee2dc0SBinbin Zhou  * @irq: IRQ number
540ffee2dc0SBinbin Zhou  * @dev_id: Pointer to ls2x_dma_chan
541ffee2dc0SBinbin Zhou  *
542ffee2dc0SBinbin Zhou  * Return: IRQ_HANDLED/IRQ_NONE
543ffee2dc0SBinbin Zhou  */
544ffee2dc0SBinbin Zhou static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
545ffee2dc0SBinbin Zhou {
546ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = dev_id;
547ffee2dc0SBinbin Zhou 	struct ls2x_dma_desc *desc;
548ffee2dc0SBinbin Zhou 
549*9de4303fSBinbin Zhou 	scoped_guard(spinlock, &lchan->vchan.lock) {
550ffee2dc0SBinbin Zhou 		desc = lchan->desc;
551ffee2dc0SBinbin Zhou 		if (desc) {
552ffee2dc0SBinbin Zhou 			if (desc->cyclic) {
553ffee2dc0SBinbin Zhou 				vchan_cyclic_callback(&desc->vdesc);
554ffee2dc0SBinbin Zhou 			} else {
555ffee2dc0SBinbin Zhou 				desc->status = DMA_COMPLETE;
556ffee2dc0SBinbin Zhou 				vchan_cookie_complete(&desc->vdesc);
557ffee2dc0SBinbin Zhou 				ls2x_dma_start_transfer(lchan);
558ffee2dc0SBinbin Zhou 			}
559ffee2dc0SBinbin Zhou 
560ffee2dc0SBinbin Zhou 			/* ls2x_dma_start_transfer() updates lchan->desc */
561ffee2dc0SBinbin Zhou 			if (!lchan->desc)
562ffee2dc0SBinbin Zhou 				ls2x_dma_write_cmd(lchan, LDMA_STOP);
563ffee2dc0SBinbin Zhou 		}
564*9de4303fSBinbin Zhou 	}
565ffee2dc0SBinbin Zhou 
566ffee2dc0SBinbin Zhou 	return IRQ_HANDLED;
567ffee2dc0SBinbin Zhou }
568ffee2dc0SBinbin Zhou 
569ffee2dc0SBinbin Zhou static int ls2x_dma_chan_init(struct platform_device *pdev,
570ffee2dc0SBinbin Zhou 			      struct ls2x_dma_priv *priv)
571ffee2dc0SBinbin Zhou {
572ffee2dc0SBinbin Zhou 	struct ls2x_dma_chan *lchan = &priv->lchan;
573ffee2dc0SBinbin Zhou 	struct device *dev = &pdev->dev;
574ffee2dc0SBinbin Zhou 	int ret;
575ffee2dc0SBinbin Zhou 
576ffee2dc0SBinbin Zhou 	lchan->irq = platform_get_irq(pdev, 0);
577ffee2dc0SBinbin Zhou 	if (lchan->irq < 0)
578ffee2dc0SBinbin Zhou 		return lchan->irq;
579ffee2dc0SBinbin Zhou 
580ffee2dc0SBinbin Zhou 	ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
581ffee2dc0SBinbin Zhou 			       dev_name(&pdev->dev), lchan);
582ffee2dc0SBinbin Zhou 	if (ret)
583ffee2dc0SBinbin Zhou 		return ret;
584ffee2dc0SBinbin Zhou 
585ffee2dc0SBinbin Zhou 	/* Initialize channels related values */
586ffee2dc0SBinbin Zhou 	INIT_LIST_HEAD(&priv->ddev.channels);
587ffee2dc0SBinbin Zhou 	lchan->vchan.desc_free = ls2x_dma_desc_free;
588ffee2dc0SBinbin Zhou 	vchan_init(&lchan->vchan, &priv->ddev);
589ffee2dc0SBinbin Zhou 
590ffee2dc0SBinbin Zhou 	return 0;
591ffee2dc0SBinbin Zhou }
592ffee2dc0SBinbin Zhou 
593ffee2dc0SBinbin Zhou /*
594ffee2dc0SBinbin Zhou  * ls2x_dma_probe - Driver probe function
595ffee2dc0SBinbin Zhou  * @pdev: Pointer to the platform_device structure
596ffee2dc0SBinbin Zhou  *
597ffee2dc0SBinbin Zhou  * Return: '0' on success and failure value on error
598ffee2dc0SBinbin Zhou  */
599ffee2dc0SBinbin Zhou static int ls2x_dma_probe(struct platform_device *pdev)
600ffee2dc0SBinbin Zhou {
601ffee2dc0SBinbin Zhou 	struct device *dev = &pdev->dev;
602ffee2dc0SBinbin Zhou 	struct ls2x_dma_priv *priv;
603ffee2dc0SBinbin Zhou 	struct dma_device *ddev;
604ffee2dc0SBinbin Zhou 	int ret;
605ffee2dc0SBinbin Zhou 
606ffee2dc0SBinbin Zhou 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
607ffee2dc0SBinbin Zhou 	if (!priv)
608ffee2dc0SBinbin Zhou 		return -ENOMEM;
609ffee2dc0SBinbin Zhou 
610ffee2dc0SBinbin Zhou 	priv->regs = devm_platform_ioremap_resource(pdev, 0);
611ffee2dc0SBinbin Zhou 	if (IS_ERR(priv->regs))
612ffee2dc0SBinbin Zhou 		return dev_err_probe(dev, PTR_ERR(priv->regs),
613ffee2dc0SBinbin Zhou 				     "devm_platform_ioremap_resource failed.\n");
614ffee2dc0SBinbin Zhou 
615bdf1621aSBinbin Zhou 	priv->dma_clk = devm_clk_get_enabled(dev, NULL);
616ffee2dc0SBinbin Zhou 	if (IS_ERR(priv->dma_clk))
617bdf1621aSBinbin Zhou 		return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "Couldn't start the clock.\n");
618ffee2dc0SBinbin Zhou 
619ffee2dc0SBinbin Zhou 	ret = ls2x_dma_chan_init(pdev, priv);
620ffee2dc0SBinbin Zhou 	if (ret)
621bdf1621aSBinbin Zhou 		return ret;
622ffee2dc0SBinbin Zhou 
623ffee2dc0SBinbin Zhou 	ddev = &priv->ddev;
624ffee2dc0SBinbin Zhou 	ddev->dev = dev;
625ffee2dc0SBinbin Zhou 	dma_cap_zero(ddev->cap_mask);
626ffee2dc0SBinbin Zhou 	dma_cap_set(DMA_SLAVE, ddev->cap_mask);
627ffee2dc0SBinbin Zhou 	dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
628ffee2dc0SBinbin Zhou 
629ffee2dc0SBinbin Zhou 	ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
630ffee2dc0SBinbin Zhou 	ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
631ffee2dc0SBinbin Zhou 	ddev->device_tx_status = dma_cookie_status;
632ffee2dc0SBinbin Zhou 	ddev->device_issue_pending = ls2x_dma_issue_pending;
633ffee2dc0SBinbin Zhou 	ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
634ffee2dc0SBinbin Zhou 	ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
635ffee2dc0SBinbin Zhou 	ddev->device_config = ls2x_dma_slave_config;
636ffee2dc0SBinbin Zhou 	ddev->device_terminate_all = ls2x_dma_terminate_all;
637ffee2dc0SBinbin Zhou 	ddev->device_synchronize = ls2x_dma_synchronize;
638ffee2dc0SBinbin Zhou 	ddev->device_pause = ls2x_dma_pause;
639ffee2dc0SBinbin Zhou 	ddev->device_resume = ls2x_dma_resume;
640ffee2dc0SBinbin Zhou 
641ffee2dc0SBinbin Zhou 	ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
642ffee2dc0SBinbin Zhou 	ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
643ffee2dc0SBinbin Zhou 	ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
644ffee2dc0SBinbin Zhou 
6457d348227SBinbin Zhou 	ret = dmaenginem_async_device_register(&priv->ddev);
646ffee2dc0SBinbin Zhou 	if (ret < 0)
647bdf1621aSBinbin Zhou 		return dev_err_probe(dev, ret, "Failed to register DMA engine device.\n");
648ffee2dc0SBinbin Zhou 
649ffee2dc0SBinbin Zhou 	ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
650ffee2dc0SBinbin Zhou 	if (ret < 0)
651bdf1621aSBinbin Zhou 		return dev_err_probe(dev, ret, "Failed to register dma controller.\n");
652ffee2dc0SBinbin Zhou 
653ffee2dc0SBinbin Zhou 	platform_set_drvdata(pdev, priv);
654ffee2dc0SBinbin Zhou 
655ffee2dc0SBinbin Zhou 	dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
656ffee2dc0SBinbin Zhou 	return 0;
657ffee2dc0SBinbin Zhou }
658ffee2dc0SBinbin Zhou 
659ffee2dc0SBinbin Zhou /*
660ffee2dc0SBinbin Zhou  * ls2x_dma_remove - Driver remove function
661ffee2dc0SBinbin Zhou  * @pdev: Pointer to the platform_device structure
662ffee2dc0SBinbin Zhou  */
663ffee2dc0SBinbin Zhou static void ls2x_dma_remove(struct platform_device *pdev)
664ffee2dc0SBinbin Zhou {
665ffee2dc0SBinbin Zhou 	of_dma_controller_free(pdev->dev.of_node);
666ffee2dc0SBinbin Zhou }
667ffee2dc0SBinbin Zhou 
668ffee2dc0SBinbin Zhou static const struct of_device_id ls2x_dma_of_match_table[] = {
669ffee2dc0SBinbin Zhou 	{ .compatible = "loongson,ls2k1000-apbdma" },
670ffee2dc0SBinbin Zhou 	{ /* sentinel */ }
671ffee2dc0SBinbin Zhou };
672ffee2dc0SBinbin Zhou MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
673ffee2dc0SBinbin Zhou 
674ffee2dc0SBinbin Zhou static struct platform_driver ls2x_dmac_driver = {
675ffee2dc0SBinbin Zhou 	.probe		= ls2x_dma_probe,
676ffee2dc0SBinbin Zhou 	.remove		= ls2x_dma_remove,
677ffee2dc0SBinbin Zhou 	.driver = {
678ffee2dc0SBinbin Zhou 		.name	= "ls2x-apbdma",
679ffee2dc0SBinbin Zhou 		.of_match_table	= ls2x_dma_of_match_table,
680ffee2dc0SBinbin Zhou 	},
681ffee2dc0SBinbin Zhou };
682ffee2dc0SBinbin Zhou module_platform_driver(ls2x_dmac_driver);
683ffee2dc0SBinbin Zhou 
684ffee2dc0SBinbin Zhou MODULE_DESCRIPTION("Loongson-2 APB DMA Controller driver");
685ffee2dc0SBinbin Zhou MODULE_AUTHOR("Loongson Technology Corporation Limited");
686ffee2dc0SBinbin Zhou MODULE_LICENSE("GPL");
687