1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Loongson-1 APB DMA Controller
4 *
5 * Copyright (C) 2015-2024 Keguang Zhang <keguang.zhang@gmail.com>
6 */
7
8 #include <linux/dmapool.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18
19 #include "dmaengine.h"
20 #include "virt-dma.h"
21
22 /* Loongson-1 DMA Control Register */
23 #define LS1X_DMA_CTRL 0x0
24
25 /* DMA Control Register Bits */
26 #define LS1X_DMA_STOP BIT(4)
27 #define LS1X_DMA_START BIT(3)
28 #define LS1X_DMA_ASK_VALID BIT(2)
29
30 /* DMA Next Field Bits */
31 #define LS1X_DMA_NEXT_VALID BIT(0)
32
33 /* DMA Command Field Bits */
34 #define LS1X_DMA_RAM2DEV BIT(12)
35 #define LS1X_DMA_INT BIT(1)
36 #define LS1X_DMA_INT_MASK BIT(0)
37
38 #define LS1X_DMA_LLI_ALIGNMENT 64
39 #define LS1X_DMA_LLI_ADDR_MASK GENMASK(31, __ffs(LS1X_DMA_LLI_ALIGNMENT))
40 #define LS1X_DMA_MAX_CHANNELS 3
41
42 enum ls1x_dmadesc_offsets {
43 LS1X_DMADESC_NEXT = 0,
44 LS1X_DMADESC_SADDR,
45 LS1X_DMADESC_DADDR,
46 LS1X_DMADESC_LENGTH,
47 LS1X_DMADESC_STRIDE,
48 LS1X_DMADESC_CYCLES,
49 LS1X_DMADESC_CMD,
50 LS1X_DMADESC_SIZE
51 };
52
53 struct ls1x_dma_lli {
54 unsigned int hw[LS1X_DMADESC_SIZE];
55 dma_addr_t phys;
56 struct list_head node;
57 } __aligned(LS1X_DMA_LLI_ALIGNMENT);
58
59 struct ls1x_dma_desc {
60 struct virt_dma_desc vd;
61 struct list_head lli_list;
62 };
63
64 struct ls1x_dma_chan {
65 struct virt_dma_chan vc;
66 struct dma_pool *lli_pool;
67 phys_addr_t src_addr;
68 phys_addr_t dst_addr;
69 enum dma_slave_buswidth src_addr_width;
70 enum dma_slave_buswidth dst_addr_width;
71 unsigned int bus_width;
72 void __iomem *reg_base;
73 int irq;
74 bool is_cyclic;
75 struct ls1x_dma_lli *curr_lli;
76 };
77
78 struct ls1x_dma {
79 struct dma_device ddev;
80 unsigned int nr_chans;
81 struct ls1x_dma_chan chan[];
82 };
83
84 static irqreturn_t ls1x_dma_irq_handler(int irq, void *data);
85
86 #define to_ls1x_dma_chan(dchan) \
87 container_of(dchan, struct ls1x_dma_chan, vc.chan)
88
89 #define to_ls1x_dma_desc(d) \
90 container_of(d, struct ls1x_dma_desc, vd)
91
chan2dev(struct dma_chan * chan)92 static inline struct device *chan2dev(struct dma_chan *chan)
93 {
94 return &chan->dev->device;
95 }
96
ls1x_dma_query(struct ls1x_dma_chan * chan,dma_addr_t * lli_phys)97 static inline int ls1x_dma_query(struct ls1x_dma_chan *chan,
98 dma_addr_t *lli_phys)
99 {
100 struct dma_chan *dchan = &chan->vc.chan;
101 int val, ret;
102
103 val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
104 val |= LS1X_DMA_ASK_VALID;
105 val |= dchan->chan_id;
106 writel(val, chan->reg_base + LS1X_DMA_CTRL);
107 ret = readl_poll_timeout_atomic(chan->reg_base + LS1X_DMA_CTRL, val,
108 !(val & LS1X_DMA_ASK_VALID), 0, 3000);
109 if (ret)
110 dev_err(chan2dev(dchan), "failed to query DMA\n");
111
112 return ret;
113 }
114
ls1x_dma_start(struct ls1x_dma_chan * chan,dma_addr_t * lli_phys)115 static inline int ls1x_dma_start(struct ls1x_dma_chan *chan,
116 dma_addr_t *lli_phys)
117 {
118 struct dma_chan *dchan = &chan->vc.chan;
119 struct device *dev = chan2dev(dchan);
120 int val, ret;
121
122 val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
123 val |= LS1X_DMA_START;
124 val |= dchan->chan_id;
125 writel(val, chan->reg_base + LS1X_DMA_CTRL);
126 ret = readl_poll_timeout(chan->reg_base + LS1X_DMA_CTRL, val,
127 !(val & LS1X_DMA_START), 0, 1000);
128 if (!ret)
129 dev_dbg(dev, "start DMA with lli_phys=%pad\n", lli_phys);
130 else
131 dev_err(dev, "failed to start DMA\n");
132
133 return ret;
134 }
135
ls1x_dma_stop(struct ls1x_dma_chan * chan)136 static inline void ls1x_dma_stop(struct ls1x_dma_chan *chan)
137 {
138 int val = readl(chan->reg_base + LS1X_DMA_CTRL);
139
140 writel(val | LS1X_DMA_STOP, chan->reg_base + LS1X_DMA_CTRL);
141 }
142
ls1x_dma_free_chan_resources(struct dma_chan * dchan)143 static void ls1x_dma_free_chan_resources(struct dma_chan *dchan)
144 {
145 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
146 struct device *dev = chan2dev(dchan);
147
148 dma_free_coherent(dev, sizeof(struct ls1x_dma_lli),
149 chan->curr_lli, chan->curr_lli->phys);
150 dma_pool_destroy(chan->lli_pool);
151 chan->lli_pool = NULL;
152 devm_free_irq(dev, chan->irq, chan);
153 vchan_free_chan_resources(&chan->vc);
154 }
155
ls1x_dma_alloc_chan_resources(struct dma_chan * dchan)156 static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
157 {
158 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
159 struct device *dev = chan2dev(dchan);
160 dma_addr_t phys;
161 int ret;
162
163 ret = devm_request_irq(dev, chan->irq, ls1x_dma_irq_handler,
164 IRQF_SHARED, dma_chan_name(dchan), chan);
165 if (ret) {
166 dev_err(dev, "failed to request IRQ %d\n", chan->irq);
167 return ret;
168 }
169
170 chan->lli_pool = dma_pool_create(dma_chan_name(dchan), dev,
171 sizeof(struct ls1x_dma_lli),
172 __alignof__(struct ls1x_dma_lli), 0);
173 if (!chan->lli_pool)
174 return -ENOMEM;
175
176 /* allocate memory for querying the current lli */
177 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
178 chan->curr_lli = dma_alloc_coherent(dev, sizeof(struct ls1x_dma_lli),
179 &phys, GFP_KERNEL);
180 if (!chan->curr_lli) {
181 dma_pool_destroy(chan->lli_pool);
182 return -ENOMEM;
183 }
184 chan->curr_lli->phys = phys;
185
186 return 0;
187 }
188
ls1x_dma_free_desc(struct virt_dma_desc * vd)189 static void ls1x_dma_free_desc(struct virt_dma_desc *vd)
190 {
191 struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
192 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(vd->tx.chan);
193 struct ls1x_dma_lli *lli, *_lli;
194
195 list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) {
196 list_del(&lli->node);
197 dma_pool_free(chan->lli_pool, lli, lli->phys);
198 }
199
200 kfree(desc);
201 }
202
ls1x_dma_alloc_desc(void)203 static struct ls1x_dma_desc *ls1x_dma_alloc_desc(void)
204 {
205 struct ls1x_dma_desc *desc;
206
207 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
208 if (!desc)
209 return NULL;
210
211 INIT_LIST_HEAD(&desc->lli_list);
212
213 return desc;
214 }
215
ls1x_dma_prep_lli(struct dma_chan * dchan,struct ls1x_dma_desc * desc,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,bool is_cyclic)216 static int ls1x_dma_prep_lli(struct dma_chan *dchan, struct ls1x_dma_desc *desc,
217 struct scatterlist *sgl, unsigned int sg_len,
218 enum dma_transfer_direction dir, bool is_cyclic)
219 {
220 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
221 struct ls1x_dma_lli *lli, *prev = NULL, *first = NULL;
222 struct device *dev = chan2dev(dchan);
223 struct list_head *pos = NULL;
224 struct scatterlist *sg;
225 unsigned int dev_addr, cmd, i;
226
227 switch (dir) {
228 case DMA_MEM_TO_DEV:
229 dev_addr = chan->dst_addr;
230 chan->bus_width = chan->dst_addr_width;
231 cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
232 break;
233 case DMA_DEV_TO_MEM:
234 dev_addr = chan->src_addr;
235 chan->bus_width = chan->src_addr_width;
236 cmd = LS1X_DMA_INT;
237 break;
238 default:
239 dev_err(dev, "unsupported DMA direction: %s\n",
240 dmaengine_get_direction_text(dir));
241 return -EINVAL;
242 }
243
244 for_each_sg(sgl, sg, sg_len, i) {
245 dma_addr_t buf_addr = sg_dma_address(sg);
246 size_t buf_len = sg_dma_len(sg);
247 dma_addr_t phys;
248
249 if (!is_dma_copy_aligned(dchan->device, buf_addr, 0, buf_len)) {
250 dev_err(dev, "buffer is not aligned\n");
251 return -EINVAL;
252 }
253
254 /* allocate HW descriptors */
255 lli = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT, &phys);
256 if (!lli) {
257 dev_err(dev, "failed to alloc lli %u\n", i);
258 return -ENOMEM;
259 }
260
261 /* setup HW descriptors */
262 lli->phys = phys;
263 lli->hw[LS1X_DMADESC_SADDR] = buf_addr;
264 lli->hw[LS1X_DMADESC_DADDR] = dev_addr;
265 lli->hw[LS1X_DMADESC_LENGTH] = buf_len / chan->bus_width;
266 lli->hw[LS1X_DMADESC_STRIDE] = 0;
267 lli->hw[LS1X_DMADESC_CYCLES] = 1;
268 lli->hw[LS1X_DMADESC_CMD] = cmd;
269
270 if (prev)
271 prev->hw[LS1X_DMADESC_NEXT] =
272 lli->phys | LS1X_DMA_NEXT_VALID;
273 prev = lli;
274
275 if (!first)
276 first = lli;
277
278 list_add_tail(&lli->node, &desc->lli_list);
279 }
280
281 if (is_cyclic) {
282 lli->hw[LS1X_DMADESC_NEXT] = first->phys | LS1X_DMA_NEXT_VALID;
283 chan->is_cyclic = is_cyclic;
284 }
285
286 list_for_each(pos, &desc->lli_list) {
287 lli = list_entry(pos, struct ls1x_dma_lli, node);
288 print_hex_dump_debug("LLI: ", DUMP_PREFIX_OFFSET, 16, 4,
289 lli, sizeof(*lli), false);
290 }
291
292 return 0;
293 }
294
295 static struct dma_async_tx_descriptor *
ls1x_dma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)296 ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
297 unsigned int sg_len, enum dma_transfer_direction dir,
298 unsigned long flags, void *context)
299 {
300 struct ls1x_dma_desc *desc;
301
302 dev_dbg(chan2dev(dchan), "sg_len=%u flags=0x%lx dir=%s\n",
303 sg_len, flags, dmaengine_get_direction_text(dir));
304
305 desc = ls1x_dma_alloc_desc();
306 if (!desc)
307 return NULL;
308
309 if (ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, false)) {
310 ls1x_dma_free_desc(&desc->vd);
311 return NULL;
312 }
313
314 return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
315 }
316
317 static struct dma_async_tx_descriptor *
ls1x_dma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)318 ls1x_dma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
319 size_t buf_len, size_t period_len,
320 enum dma_transfer_direction dir, unsigned long flags)
321 {
322 struct ls1x_dma_desc *desc;
323 struct scatterlist *sgl;
324 unsigned int sg_len;
325 unsigned int i;
326 int ret;
327
328 dev_dbg(chan2dev(dchan),
329 "buf_len=%zu period_len=%zu flags=0x%lx dir=%s\n",
330 buf_len, period_len, flags, dmaengine_get_direction_text(dir));
331
332 desc = ls1x_dma_alloc_desc();
333 if (!desc)
334 return NULL;
335
336 /* allocate the scatterlist */
337 sg_len = buf_len / period_len;
338 sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
339 if (!sgl)
340 return NULL;
341
342 sg_init_table(sgl, sg_len);
343 for (i = 0; i < sg_len; ++i) {
344 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(buf_addr)),
345 period_len, offset_in_page(buf_addr));
346 sg_dma_address(&sgl[i]) = buf_addr;
347 sg_dma_len(&sgl[i]) = period_len;
348 buf_addr += period_len;
349 }
350
351 ret = ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, true);
352 kfree(sgl);
353 if (ret) {
354 ls1x_dma_free_desc(&desc->vd);
355 return NULL;
356 }
357
358 return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
359 }
360
ls1x_dma_slave_config(struct dma_chan * dchan,struct dma_slave_config * config)361 static int ls1x_dma_slave_config(struct dma_chan *dchan,
362 struct dma_slave_config *config)
363 {
364 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
365
366 chan->src_addr = config->src_addr;
367 chan->src_addr_width = config->src_addr_width;
368 chan->dst_addr = config->dst_addr;
369 chan->dst_addr_width = config->dst_addr_width;
370
371 return 0;
372 }
373
ls1x_dma_pause(struct dma_chan * dchan)374 static int ls1x_dma_pause(struct dma_chan *dchan)
375 {
376 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
377 int ret;
378
379 guard(spinlock_irqsave)(&chan->vc.lock);
380 /* save the current lli */
381 ret = ls1x_dma_query(chan, &chan->curr_lli->phys);
382 if (!ret)
383 ls1x_dma_stop(chan);
384
385 return ret;
386 }
387
ls1x_dma_resume(struct dma_chan * dchan)388 static int ls1x_dma_resume(struct dma_chan *dchan)
389 {
390 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
391
392 guard(spinlock_irqsave)(&chan->vc.lock);
393
394 return ls1x_dma_start(chan, &chan->curr_lli->phys);
395 }
396
ls1x_dma_terminate_all(struct dma_chan * dchan)397 static int ls1x_dma_terminate_all(struct dma_chan *dchan)
398 {
399 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
400 struct virt_dma_desc *vd;
401 LIST_HEAD(head);
402
403 ls1x_dma_stop(chan);
404
405 scoped_guard(spinlock_irqsave, &chan->vc.lock) {
406 vd = vchan_next_desc(&chan->vc);
407 if (vd)
408 vchan_terminate_vdesc(vd);
409
410 vchan_get_all_descriptors(&chan->vc, &head);
411 }
412
413 vchan_dma_desc_free_list(&chan->vc, &head);
414
415 return 0;
416 }
417
ls1x_dma_synchronize(struct dma_chan * dchan)418 static void ls1x_dma_synchronize(struct dma_chan *dchan)
419 {
420 vchan_synchronize(to_virt_chan(dchan));
421 }
422
ls1x_dma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * state)423 static enum dma_status ls1x_dma_tx_status(struct dma_chan *dchan,
424 dma_cookie_t cookie,
425 struct dma_tx_state *state)
426 {
427 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
428 struct virt_dma_desc *vd;
429 enum dma_status status;
430 size_t bytes = 0;
431
432 status = dma_cookie_status(dchan, cookie, state);
433 if (status == DMA_COMPLETE)
434 return status;
435
436 scoped_guard(spinlock_irqsave, &chan->vc.lock) {
437 vd = vchan_find_desc(&chan->vc, cookie);
438 if (vd) {
439 struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
440 struct ls1x_dma_lli *lli;
441 dma_addr_t next_phys;
442
443 /* get the current lli */
444 if (ls1x_dma_query(chan, &chan->curr_lli->phys))
445 return status;
446
447 /* locate the current lli */
448 next_phys = chan->curr_lli->hw[LS1X_DMADESC_NEXT];
449 list_for_each_entry(lli, &desc->lli_list, node)
450 if (lli->hw[LS1X_DMADESC_NEXT] == next_phys)
451 break;
452
453 dev_dbg(chan2dev(dchan), "current lli_phys=%pad",
454 &lli->phys);
455
456 /* count the residues */
457 list_for_each_entry_from(lli, &desc->lli_list, node)
458 bytes += lli->hw[LS1X_DMADESC_LENGTH] *
459 chan->bus_width;
460 }
461 }
462
463 dma_set_residue(state, bytes);
464
465 return status;
466 }
467
ls1x_dma_issue_pending(struct dma_chan * dchan)468 static void ls1x_dma_issue_pending(struct dma_chan *dchan)
469 {
470 struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
471
472 guard(spinlock_irqsave)(&chan->vc.lock);
473
474 if (vchan_issue_pending(&chan->vc)) {
475 struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
476
477 if (vd) {
478 struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
479 struct ls1x_dma_lli *lli;
480
481 lli = list_first_entry(&desc->lli_list,
482 struct ls1x_dma_lli, node);
483 ls1x_dma_start(chan, &lli->phys);
484 }
485 }
486 }
487
ls1x_dma_irq_handler(int irq,void * data)488 static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
489 {
490 struct ls1x_dma_chan *chan = data;
491 struct dma_chan *dchan = &chan->vc.chan;
492 struct device *dev = chan2dev(dchan);
493 struct virt_dma_desc *vd;
494
495 scoped_guard(spinlock, &chan->vc.lock) {
496 vd = vchan_next_desc(&chan->vc);
497 if (!vd) {
498 dev_warn(dev,
499 "IRQ %d with no active desc on channel %d\n",
500 irq, dchan->chan_id);
501 return IRQ_NONE;
502 }
503
504 if (chan->is_cyclic) {
505 vchan_cyclic_callback(vd);
506 } else {
507 list_del(&vd->node);
508 vchan_cookie_complete(vd);
509 }
510 }
511
512 dev_dbg(dev, "DMA IRQ %d on channel %d\n", irq, dchan->chan_id);
513
514 return IRQ_HANDLED;
515 }
516
ls1x_dma_chan_probe(struct platform_device * pdev,struct ls1x_dma * dma)517 static int ls1x_dma_chan_probe(struct platform_device *pdev,
518 struct ls1x_dma *dma)
519 {
520 void __iomem *reg_base;
521 int id;
522
523 reg_base = devm_platform_ioremap_resource(pdev, 0);
524 if (IS_ERR(reg_base))
525 return PTR_ERR(reg_base);
526
527 for (id = 0; id < dma->nr_chans; id++) {
528 struct ls1x_dma_chan *chan = &dma->chan[id];
529 char pdev_irqname[16];
530
531 snprintf(pdev_irqname, sizeof(pdev_irqname), "ch%d", id);
532 chan->irq = platform_get_irq_byname(pdev, pdev_irqname);
533 if (chan->irq < 0)
534 return dev_err_probe(&pdev->dev, chan->irq,
535 "failed to get IRQ for ch%d\n",
536 id);
537
538 chan->reg_base = reg_base;
539 chan->vc.desc_free = ls1x_dma_free_desc;
540 vchan_init(&chan->vc, &dma->ddev);
541 }
542
543 return 0;
544 }
545
ls1x_dma_chan_remove(struct ls1x_dma * dma)546 static void ls1x_dma_chan_remove(struct ls1x_dma *dma)
547 {
548 int id;
549
550 for (id = 0; id < dma->nr_chans; id++) {
551 struct ls1x_dma_chan *chan = &dma->chan[id];
552
553 if (chan->vc.chan.device == &dma->ddev) {
554 list_del(&chan->vc.chan.device_node);
555 tasklet_kill(&chan->vc.task);
556 }
557 }
558 }
559
ls1x_dma_probe(struct platform_device * pdev)560 static int ls1x_dma_probe(struct platform_device *pdev)
561 {
562 struct device *dev = &pdev->dev;
563 struct dma_device *ddev;
564 struct ls1x_dma *dma;
565 int ret;
566
567 ret = platform_irq_count(pdev);
568 if (ret <= 0 || ret > LS1X_DMA_MAX_CHANNELS)
569 return dev_err_probe(dev, -EINVAL,
570 "Invalid number of IRQ channels: %d\n",
571 ret);
572
573 dma = devm_kzalloc(dev, struct_size(dma, chan, ret), GFP_KERNEL);
574 if (!dma)
575 return -ENOMEM;
576 dma->nr_chans = ret;
577
578 /* initialize DMA device */
579 ddev = &dma->ddev;
580 ddev->dev = dev;
581 ddev->copy_align = DMAENGINE_ALIGN_4_BYTES;
582 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
583 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
584 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
585 ddev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
586 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
587 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
588 ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
589 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
590 ddev->device_alloc_chan_resources = ls1x_dma_alloc_chan_resources;
591 ddev->device_free_chan_resources = ls1x_dma_free_chan_resources;
592 ddev->device_prep_slave_sg = ls1x_dma_prep_slave_sg;
593 ddev->device_prep_dma_cyclic = ls1x_dma_prep_dma_cyclic;
594 ddev->device_config = ls1x_dma_slave_config;
595 ddev->device_pause = ls1x_dma_pause;
596 ddev->device_resume = ls1x_dma_resume;
597 ddev->device_terminate_all = ls1x_dma_terminate_all;
598 ddev->device_synchronize = ls1x_dma_synchronize;
599 ddev->device_tx_status = ls1x_dma_tx_status;
600 ddev->device_issue_pending = ls1x_dma_issue_pending;
601 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
602 INIT_LIST_HEAD(&ddev->channels);
603
604 /* initialize DMA channels */
605 ret = ls1x_dma_chan_probe(pdev, dma);
606 if (ret)
607 goto err;
608
609 ret = dmaenginem_async_device_register(ddev);
610 if (ret) {
611 dev_err(dev, "failed to register DMA device\n");
612 goto err;
613 }
614
615 ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
616 ddev);
617 if (ret) {
618 dev_err(dev, "failed to register DMA controller\n");
619 goto err;
620 }
621
622 platform_set_drvdata(pdev, dma);
623 dev_info(dev, "Loongson1 DMA driver registered\n");
624
625 return 0;
626
627 err:
628 ls1x_dma_chan_remove(dma);
629
630 return ret;
631 }
632
ls1x_dma_remove(struct platform_device * pdev)633 static void ls1x_dma_remove(struct platform_device *pdev)
634 {
635 struct ls1x_dma *dma = platform_get_drvdata(pdev);
636
637 of_dma_controller_free(pdev->dev.of_node);
638 ls1x_dma_chan_remove(dma);
639 }
640
641 static const struct of_device_id ls1x_dma_match[] = {
642 { .compatible = "loongson,ls1b-apbdma" },
643 { /* sentinel */ }
644 };
645 MODULE_DEVICE_TABLE(of, ls1x_dma_match);
646
647 static struct platform_driver ls1x_dma_driver = {
648 .probe = ls1x_dma_probe,
649 .remove = ls1x_dma_remove,
650 .driver = {
651 .name = KBUILD_MODNAME,
652 .of_match_table = ls1x_dma_match,
653 },
654 };
655
656 module_platform_driver(ls1x_dma_driver);
657
658 MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
659 MODULE_DESCRIPTION("Loongson-1 APB DMA Controller driver");
660 MODULE_LICENSE("GPL");
661