1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/G2L DMA Controller Driver
4 *
5 * Based on imx-dma.c
6 *
7 * Copyright (C) 2021 Renesas Electronics Corp.
8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/cleanup.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqchip/irq-renesas-rzv2h.h>
19 #include <linux/irqchip/irq-renesas-rzt2h.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_dma.h>
24 #include <linux/of_platform.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30
31 #include "../dmaengine.h"
32 #include "../virt-dma.h"
33
34 enum rz_dmac_prep_type {
35 RZ_DMAC_DESC_MEMCPY,
36 RZ_DMAC_DESC_SLAVE_SG,
37 };
38
39 struct rz_lmdesc {
40 u32 header;
41 u32 sa;
42 u32 da;
43 u32 tb;
44 u32 chcfg;
45 u32 chitvl;
46 u32 chext;
47 u32 nxla;
48 };
49
50 struct rz_dmac_desc {
51 struct virt_dma_desc vd;
52 dma_addr_t src;
53 dma_addr_t dest;
54 size_t len;
55 struct list_head node;
56 enum dma_transfer_direction direction;
57 enum rz_dmac_prep_type type;
58 /* For slave sg */
59 struct scatterlist *sg;
60 unsigned int sgcount;
61 };
62
63 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
64
65 struct rz_dmac_chan {
66 struct virt_dma_chan vc;
67 void __iomem *ch_base;
68 void __iomem *ch_cmn_base;
69 unsigned int index;
70 struct rz_dmac_desc *desc;
71 int descs_allocated;
72
73 dma_addr_t src_per_address;
74 dma_addr_t dst_per_address;
75
76 u32 chcfg;
77 u32 chctrl;
78 int mid_rid;
79
80 struct list_head ld_free;
81 struct list_head ld_queue;
82 struct list_head ld_active;
83
84 struct {
85 struct rz_lmdesc *base;
86 struct rz_lmdesc *head;
87 struct rz_lmdesc *tail;
88 dma_addr_t base_dma;
89 } lmdesc;
90 };
91
92 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
93
94 struct rz_dmac_icu {
95 struct platform_device *pdev;
96 u8 dmac_index;
97 };
98
99 struct rz_dmac_info {
100 void (*icu_register_dma_req)(struct platform_device *icu_dev,
101 u8 dmac_index, u8 dmac_channel, u16 req_no);
102 u16 default_dma_req_no;
103 };
104
105 struct rz_dmac {
106 struct dma_device engine;
107 struct rz_dmac_icu icu;
108 const struct rz_dmac_info *info;
109 struct device *dev;
110 struct reset_control *rstc;
111 void __iomem *base;
112 void __iomem *ext_base;
113
114 unsigned int n_channels;
115 struct rz_dmac_chan *channels;
116
117 DECLARE_BITMAP(modules, 1024);
118 };
119
120 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
121
122 /*
123 * -----------------------------------------------------------------------------
124 * Registers
125 */
126
127 #define CRTB 0x0020
128 #define CHSTAT 0x0024
129 #define CHCTRL 0x0028
130 #define CHCFG 0x002c
131 #define NXLA 0x0038
132 #define CRLA 0x003c
133
134 #define DCTRL 0x0000
135
136 #define EACH_CHANNEL_OFFSET 0x0040
137 #define CHANNEL_0_7_OFFSET 0x0000
138 #define CHANNEL_0_7_COMMON_BASE 0x0300
139 #define CHANNEL_8_15_OFFSET 0x0400
140 #define CHANNEL_8_15_COMMON_BASE 0x0700
141
142 #define CHSTAT_ER BIT(4)
143 #define CHSTAT_SUS BIT(3)
144 #define CHSTAT_EN BIT(0)
145
146 #define CHCTRL_CLRINTMSK BIT(17)
147 #define CHCTRL_CLRSUS BIT(9)
148 #define CHCTRL_SETSUS BIT(8)
149 #define CHCTRL_CLRTC BIT(6)
150 #define CHCTRL_CLREND BIT(5)
151 #define CHCTRL_CLRRQ BIT(4)
152 #define CHCTRL_SWRST BIT(3)
153 #define CHCTRL_STG BIT(2)
154 #define CHCTRL_CLREN BIT(1)
155 #define CHCTRL_SETEN BIT(0)
156 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
157 CHCTRL_CLRTC | CHCTRL_CLREND | \
158 CHCTRL_CLRRQ | CHCTRL_SWRST | \
159 CHCTRL_CLREN)
160
161 #define CHCFG_DMS BIT(31)
162 #define CHCFG_DEM BIT(24)
163 #define CHCFG_DAD BIT(21)
164 #define CHCFG_SAD BIT(20)
165 #define CHCFG_REQD BIT(3)
166 #define CHCFG_SEL(bits) ((bits) & 0x07)
167 #define CHCFG_MEM_COPY (0x80400008)
168 #define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
169 #define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
170 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
171 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
172 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
173 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
174
175 #define MID_RID_MASK GENMASK(9, 0)
176 #define CHCFG_MASK GENMASK(15, 10)
177 #define CHCFG_DS_INVALID 0xFF
178 #define DCTRL_LVINT BIT(1)
179 #define DCTRL_PR BIT(0)
180 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
181
182 /* LINK MODE DESCRIPTOR */
183 #define HEADER_LV BIT(0)
184
185 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
186 #define RZ_DMAC_MAX_CHANNELS 16
187 #define DMAC_NR_LMDESC 64
188
189 /* RZ/V2H ICU related */
190 #define RZV2H_MAX_DMAC_INDEX 4
191
192 /*
193 * -----------------------------------------------------------------------------
194 * Device access
195 */
196
rz_dmac_writel(struct rz_dmac * dmac,unsigned int val,unsigned int offset)197 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
198 unsigned int offset)
199 {
200 writel(val, dmac->base + offset);
201 }
202
rz_dmac_ext_writel(struct rz_dmac * dmac,unsigned int val,unsigned int offset)203 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
204 unsigned int offset)
205 {
206 writel(val, dmac->ext_base + offset);
207 }
208
rz_dmac_ext_readl(struct rz_dmac * dmac,unsigned int offset)209 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
210 {
211 return readl(dmac->ext_base + offset);
212 }
213
rz_dmac_ch_writel(struct rz_dmac_chan * channel,unsigned int val,unsigned int offset,int which)214 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
215 unsigned int offset, int which)
216 {
217 if (which)
218 writel(val, channel->ch_base + offset);
219 else
220 writel(val, channel->ch_cmn_base + offset);
221 }
222
rz_dmac_ch_readl(struct rz_dmac_chan * channel,unsigned int offset,int which)223 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
224 unsigned int offset, int which)
225 {
226 if (which)
227 return readl(channel->ch_base + offset);
228 else
229 return readl(channel->ch_cmn_base + offset);
230 }
231
232 /*
233 * -----------------------------------------------------------------------------
234 * Initialization
235 */
236
rz_lmdesc_setup(struct rz_dmac_chan * channel,struct rz_lmdesc * lmdesc)237 static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
238 struct rz_lmdesc *lmdesc)
239 {
240 u32 nxla;
241
242 channel->lmdesc.base = lmdesc;
243 channel->lmdesc.head = lmdesc;
244 channel->lmdesc.tail = lmdesc;
245 nxla = channel->lmdesc.base_dma;
246 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
247 lmdesc->header = 0;
248 nxla += sizeof(*lmdesc);
249 lmdesc->nxla = nxla;
250 lmdesc++;
251 }
252
253 lmdesc->header = 0;
254 lmdesc->nxla = channel->lmdesc.base_dma;
255 }
256
257 /*
258 * -----------------------------------------------------------------------------
259 * Descriptors preparation
260 */
261
rz_dmac_lmdesc_recycle(struct rz_dmac_chan * channel)262 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
263 {
264 struct rz_lmdesc *lmdesc = channel->lmdesc.head;
265
266 while (!(lmdesc->header & HEADER_LV)) {
267 lmdesc->header = 0;
268 lmdesc++;
269 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
270 lmdesc = channel->lmdesc.base;
271 }
272 channel->lmdesc.head = lmdesc;
273 }
274
rz_dmac_enable_hw(struct rz_dmac_chan * channel)275 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
276 {
277 struct dma_chan *chan = &channel->vc.chan;
278 struct rz_dmac *dmac = to_rz_dmac(chan->device);
279 u32 nxla;
280 u32 chctrl;
281 u32 chstat;
282
283 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
284
285 rz_dmac_lmdesc_recycle(channel);
286
287 nxla = channel->lmdesc.base_dma +
288 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
289 channel->lmdesc.base));
290
291 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
292 if (!(chstat & CHSTAT_EN)) {
293 chctrl = (channel->chctrl | CHCTRL_SETEN);
294 rz_dmac_ch_writel(channel, nxla, NXLA, 1);
295 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
296 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
297 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
298 }
299 }
300
rz_dmac_disable_hw(struct rz_dmac_chan * channel)301 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
302 {
303 struct dma_chan *chan = &channel->vc.chan;
304 struct rz_dmac *dmac = to_rz_dmac(chan->device);
305
306 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
307
308 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
309 }
310
rz_dmac_set_dmars_register(struct rz_dmac * dmac,int nr,u32 dmars)311 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
312 {
313 u32 dmars_offset = (nr / 2) * 4;
314 u32 shift = (nr % 2) * 16;
315 u32 dmars32;
316
317 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
318 dmars32 &= ~(0xffff << shift);
319 dmars32 |= dmars << shift;
320
321 rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
322 }
323
rz_dmac_set_dma_req_no(struct rz_dmac * dmac,unsigned int index,int req_no)324 static void rz_dmac_set_dma_req_no(struct rz_dmac *dmac, unsigned int index,
325 int req_no)
326 {
327 if (dmac->info->icu_register_dma_req)
328 dmac->info->icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
329 index, req_no);
330 else
331 rz_dmac_set_dmars_register(dmac, index, req_no);
332 }
333
rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan * channel)334 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
335 {
336 struct dma_chan *chan = &channel->vc.chan;
337 struct rz_dmac *dmac = to_rz_dmac(chan->device);
338 struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
339 struct rz_dmac_desc *d = channel->desc;
340 u32 chcfg = CHCFG_MEM_COPY;
341
342 /* prepare descriptor */
343 lmdesc->sa = d->src;
344 lmdesc->da = d->dest;
345 lmdesc->tb = d->len;
346 lmdesc->chcfg = chcfg;
347 lmdesc->chitvl = 0;
348 lmdesc->chext = 0;
349 lmdesc->header = HEADER_LV;
350
351 rz_dmac_set_dma_req_no(dmac, channel->index, dmac->info->default_dma_req_no);
352
353 channel->chcfg = chcfg;
354 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
355 }
356
rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan * channel)357 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
358 {
359 struct dma_chan *chan = &channel->vc.chan;
360 struct rz_dmac *dmac = to_rz_dmac(chan->device);
361 struct rz_dmac_desc *d = channel->desc;
362 struct scatterlist *sg, *sgl = d->sg;
363 struct rz_lmdesc *lmdesc;
364 unsigned int i, sg_len = d->sgcount;
365
366 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
367
368 if (d->direction == DMA_DEV_TO_MEM) {
369 channel->chcfg |= CHCFG_SAD;
370 channel->chcfg &= ~CHCFG_REQD;
371 } else {
372 channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
373 }
374
375 lmdesc = channel->lmdesc.tail;
376
377 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
378 if (d->direction == DMA_DEV_TO_MEM) {
379 lmdesc->sa = channel->src_per_address;
380 lmdesc->da = sg_dma_address(sg);
381 } else {
382 lmdesc->sa = sg_dma_address(sg);
383 lmdesc->da = channel->dst_per_address;
384 }
385
386 lmdesc->tb = sg_dma_len(sg);
387 lmdesc->chitvl = 0;
388 lmdesc->chext = 0;
389 if (i == (sg_len - 1)) {
390 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
391 lmdesc->header = HEADER_LV;
392 } else {
393 lmdesc->chcfg = channel->chcfg;
394 lmdesc->header = HEADER_LV;
395 }
396 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
397 lmdesc = channel->lmdesc.base;
398 }
399
400 channel->lmdesc.tail = lmdesc;
401
402 rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
403
404 channel->chctrl = CHCTRL_SETEN;
405 }
406
rz_dmac_xfer_desc(struct rz_dmac_chan * chan)407 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
408 {
409 struct rz_dmac_desc *d = chan->desc;
410 struct virt_dma_desc *vd;
411
412 vd = vchan_next_desc(&chan->vc);
413 if (!vd)
414 return 0;
415
416 list_del(&vd->node);
417
418 switch (d->type) {
419 case RZ_DMAC_DESC_MEMCPY:
420 rz_dmac_prepare_desc_for_memcpy(chan);
421 break;
422
423 case RZ_DMAC_DESC_SLAVE_SG:
424 rz_dmac_prepare_descs_for_slave_sg(chan);
425 break;
426
427 default:
428 return -EINVAL;
429 }
430
431 rz_dmac_enable_hw(chan);
432
433 return 0;
434 }
435
436 /*
437 * -----------------------------------------------------------------------------
438 * DMA engine operations
439 */
440
rz_dmac_alloc_chan_resources(struct dma_chan * chan)441 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
442 {
443 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
444
445 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
446 struct rz_dmac_desc *desc;
447
448 desc = kzalloc_obj(*desc);
449 if (!desc)
450 break;
451
452 /* No need to lock. This is called only for the 1st client. */
453 list_add_tail(&desc->node, &channel->ld_free);
454 channel->descs_allocated++;
455 }
456
457 if (!channel->descs_allocated)
458 return -ENOMEM;
459
460 return channel->descs_allocated;
461 }
462
rz_dmac_free_chan_resources(struct dma_chan * chan)463 static void rz_dmac_free_chan_resources(struct dma_chan *chan)
464 {
465 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
466 struct rz_dmac *dmac = to_rz_dmac(chan->device);
467 struct rz_dmac_desc *desc, *_desc;
468 unsigned long flags;
469
470 spin_lock_irqsave(&channel->vc.lock, flags);
471
472 rz_lmdesc_setup(channel, channel->lmdesc.base);
473
474 rz_dmac_disable_hw(channel);
475 list_splice_tail_init(&channel->ld_active, &channel->ld_free);
476 list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
477
478 if (channel->mid_rid >= 0) {
479 clear_bit(channel->mid_rid, dmac->modules);
480 channel->mid_rid = -EINVAL;
481 }
482
483 spin_unlock_irqrestore(&channel->vc.lock, flags);
484
485 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
486 kfree(desc);
487 channel->descs_allocated--;
488 }
489
490 INIT_LIST_HEAD(&channel->ld_free);
491 vchan_free_chan_resources(&channel->vc);
492 }
493
494 static struct dma_async_tx_descriptor *
rz_dmac_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)495 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
496 size_t len, unsigned long flags)
497 {
498 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
499 struct rz_dmac *dmac = to_rz_dmac(chan->device);
500 struct rz_dmac_desc *desc;
501
502 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
503 __func__, channel->index, &src, &dest, len);
504
505 scoped_guard(spinlock_irqsave, &channel->vc.lock) {
506 if (list_empty(&channel->ld_free))
507 return NULL;
508
509 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
510
511 desc->type = RZ_DMAC_DESC_MEMCPY;
512 desc->src = src;
513 desc->dest = dest;
514 desc->len = len;
515 desc->direction = DMA_MEM_TO_MEM;
516
517 list_move_tail(channel->ld_free.next, &channel->ld_queue);
518 }
519
520 return vchan_tx_prep(&channel->vc, &desc->vd, flags);
521 }
522
523 static struct dma_async_tx_descriptor *
rz_dmac_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)524 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
525 unsigned int sg_len,
526 enum dma_transfer_direction direction,
527 unsigned long flags, void *context)
528 {
529 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
530 struct rz_dmac_desc *desc;
531 struct scatterlist *sg;
532 int dma_length = 0;
533 int i = 0;
534
535 scoped_guard(spinlock_irqsave, &channel->vc.lock) {
536 if (list_empty(&channel->ld_free))
537 return NULL;
538
539 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
540
541 for_each_sg(sgl, sg, sg_len, i)
542 dma_length += sg_dma_len(sg);
543
544 desc->type = RZ_DMAC_DESC_SLAVE_SG;
545 desc->sg = sgl;
546 desc->sgcount = sg_len;
547 desc->len = dma_length;
548 desc->direction = direction;
549
550 if (direction == DMA_DEV_TO_MEM)
551 desc->src = channel->src_per_address;
552 else
553 desc->dest = channel->dst_per_address;
554
555 list_move_tail(channel->ld_free.next, &channel->ld_queue);
556 }
557
558 return vchan_tx_prep(&channel->vc, &desc->vd, flags);
559 }
560
rz_dmac_terminate_all(struct dma_chan * chan)561 static int rz_dmac_terminate_all(struct dma_chan *chan)
562 {
563 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
564 unsigned long flags;
565 LIST_HEAD(head);
566
567 spin_lock_irqsave(&channel->vc.lock, flags);
568 rz_dmac_disable_hw(channel);
569 rz_lmdesc_setup(channel, channel->lmdesc.base);
570
571 list_splice_tail_init(&channel->ld_active, &channel->ld_free);
572 list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
573 vchan_get_all_descriptors(&channel->vc, &head);
574 spin_unlock_irqrestore(&channel->vc.lock, flags);
575 vchan_dma_desc_free_list(&channel->vc, &head);
576
577 return 0;
578 }
579
rz_dmac_issue_pending(struct dma_chan * chan)580 static void rz_dmac_issue_pending(struct dma_chan *chan)
581 {
582 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
583 struct rz_dmac *dmac = to_rz_dmac(chan->device);
584 struct rz_dmac_desc *desc;
585 unsigned long flags;
586
587 spin_lock_irqsave(&channel->vc.lock, flags);
588
589 if (!list_empty(&channel->ld_queue)) {
590 desc = list_first_entry(&channel->ld_queue,
591 struct rz_dmac_desc, node);
592 channel->desc = desc;
593 if (vchan_issue_pending(&channel->vc)) {
594 if (rz_dmac_xfer_desc(channel) < 0)
595 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
596 channel->index);
597 else
598 list_move_tail(channel->ld_queue.next,
599 &channel->ld_active);
600 }
601 }
602
603 spin_unlock_irqrestore(&channel->vc.lock, flags);
604 }
605
rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)606 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
607 {
608 u8 i;
609 static const enum dma_slave_buswidth ds_lut[] = {
610 DMA_SLAVE_BUSWIDTH_1_BYTE,
611 DMA_SLAVE_BUSWIDTH_2_BYTES,
612 DMA_SLAVE_BUSWIDTH_4_BYTES,
613 DMA_SLAVE_BUSWIDTH_8_BYTES,
614 DMA_SLAVE_BUSWIDTH_16_BYTES,
615 DMA_SLAVE_BUSWIDTH_32_BYTES,
616 DMA_SLAVE_BUSWIDTH_64_BYTES,
617 DMA_SLAVE_BUSWIDTH_128_BYTES,
618 };
619
620 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
621 if (ds_lut[i] == ds)
622 return i;
623 }
624
625 return CHCFG_DS_INVALID;
626 }
627
rz_dmac_config(struct dma_chan * chan,struct dma_slave_config * config)628 static int rz_dmac_config(struct dma_chan *chan,
629 struct dma_slave_config *config)
630 {
631 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
632 u32 val;
633
634 channel->dst_per_address = config->dst_addr;
635 channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
636 if (channel->dst_per_address) {
637 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
638 if (val == CHCFG_DS_INVALID)
639 return -EINVAL;
640
641 channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
642 }
643
644 channel->src_per_address = config->src_addr;
645 channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
646 if (channel->src_per_address) {
647 val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
648 if (val == CHCFG_DS_INVALID)
649 return -EINVAL;
650
651 channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
652 }
653
654 return 0;
655 }
656
rz_dmac_virt_desc_free(struct virt_dma_desc * vd)657 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
658 {
659 /*
660 * Place holder
661 * Descriptor allocation is done during alloc_chan_resources and
662 * get freed during free_chan_resources.
663 * list is used to manage the descriptors and avoid any memory
664 * allocation/free during DMA read/write.
665 */
666 }
667
rz_dmac_device_synchronize(struct dma_chan * chan)668 static void rz_dmac_device_synchronize(struct dma_chan *chan)
669 {
670 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
671 struct rz_dmac *dmac = to_rz_dmac(chan->device);
672 u32 chstat;
673 int ret;
674
675 ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
676 100, 100000, false, channel, CHSTAT, 1);
677 if (ret < 0)
678 dev_warn(dmac->dev, "DMA Timeout");
679
680 rz_dmac_set_dma_req_no(dmac, channel->index, dmac->info->default_dma_req_no);
681 }
682
683 static struct rz_lmdesc *
rz_dmac_get_next_lmdesc(struct rz_lmdesc * base,struct rz_lmdesc * lmdesc)684 rz_dmac_get_next_lmdesc(struct rz_lmdesc *base, struct rz_lmdesc *lmdesc)
685 {
686 struct rz_lmdesc *next = ++lmdesc;
687
688 if (next >= base + DMAC_NR_LMDESC)
689 next = base;
690
691 return next;
692 }
693
rz_dmac_calculate_residue_bytes_in_vd(struct rz_dmac_chan * channel,u32 crla)694 static u32 rz_dmac_calculate_residue_bytes_in_vd(struct rz_dmac_chan *channel, u32 crla)
695 {
696 struct rz_lmdesc *lmdesc = channel->lmdesc.head;
697 struct dma_chan *chan = &channel->vc.chan;
698 struct rz_dmac *dmac = to_rz_dmac(chan->device);
699 u32 residue = 0, i = 0;
700
701 while (lmdesc->nxla != crla) {
702 lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
703 if (++i >= DMAC_NR_LMDESC)
704 return 0;
705 }
706
707 /* Calculate residue from next lmdesc to end of virtual desc */
708 while (lmdesc->chcfg & CHCFG_DEM) {
709 residue += lmdesc->tb;
710 lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
711 }
712
713 dev_dbg(dmac->dev, "%s: VD residue is %u\n", __func__, residue);
714
715 return residue;
716 }
717
rz_dmac_chan_get_residue(struct rz_dmac_chan * channel,dma_cookie_t cookie)718 static u32 rz_dmac_chan_get_residue(struct rz_dmac_chan *channel,
719 dma_cookie_t cookie)
720 {
721 struct rz_dmac_desc *current_desc, *desc;
722 enum dma_status status;
723 u32 crla, crtb, i;
724
725 /* Get current processing virtual descriptor */
726 current_desc = list_first_entry(&channel->ld_active,
727 struct rz_dmac_desc, node);
728 if (!current_desc)
729 return 0;
730
731 /*
732 * If the cookie corresponds to a descriptor that has been completed
733 * there is no residue. The same check has already been performed by the
734 * caller but without holding the channel lock, so the descriptor could
735 * now be complete.
736 */
737 status = dma_cookie_status(&channel->vc.chan, cookie, NULL);
738 if (status == DMA_COMPLETE)
739 return 0;
740
741 /*
742 * If the cookie doesn't correspond to the currently processing virtual
743 * descriptor then the descriptor hasn't been processed yet, and the
744 * residue is equal to the full descriptor size. Also, a client driver
745 * is possible to call this function before rz_dmac_irq_handler_thread()
746 * runs. In this case, the running descriptor will be the next
747 * descriptor, and will appear in the done list. So, if the argument
748 * cookie matches the done list's cookie, we can assume the residue is
749 * zero.
750 */
751 if (cookie != current_desc->vd.tx.cookie) {
752 list_for_each_entry(desc, &channel->ld_free, node) {
753 if (cookie == desc->vd.tx.cookie)
754 return 0;
755 }
756
757 list_for_each_entry(desc, &channel->ld_queue, node) {
758 if (cookie == desc->vd.tx.cookie)
759 return desc->len;
760 }
761
762 list_for_each_entry(desc, &channel->ld_active, node) {
763 if (cookie == desc->vd.tx.cookie)
764 return desc->len;
765 }
766
767 /*
768 * No descriptor found for the cookie, there's thus no residue.
769 * This shouldn't happen if the calling driver passes a correct
770 * cookie value.
771 */
772 WARN(1, "No descriptor for cookie!");
773 return 0;
774 }
775
776 /*
777 * We need to read two registers. Make sure the hardware does not move
778 * to next lmdesc while reading the current lmdesc. Trying it 3 times
779 * should be enough: initial read, retry, retry for the paranoid.
780 */
781 for (i = 0; i < 3; i++) {
782 crla = rz_dmac_ch_readl(channel, CRLA, 1);
783 crtb = rz_dmac_ch_readl(channel, CRTB, 1);
784 /* Still the same? */
785 if (crla == rz_dmac_ch_readl(channel, CRLA, 1))
786 break;
787 }
788
789 WARN_ONCE(i >= 3, "residue might not be continuous!");
790
791 /*
792 * Calculate number of bytes transferred in processing virtual descriptor.
793 * One virtual descriptor can have many lmdesc.
794 */
795 return crtb + rz_dmac_calculate_residue_bytes_in_vd(channel, crla);
796 }
797
rz_dmac_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)798 static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
799 dma_cookie_t cookie,
800 struct dma_tx_state *txstate)
801 {
802 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
803 enum dma_status status;
804 u32 residue;
805
806 status = dma_cookie_status(chan, cookie, txstate);
807 if (status == DMA_COMPLETE || !txstate)
808 return status;
809
810 scoped_guard(spinlock_irqsave, &channel->vc.lock) {
811 u32 val;
812
813 residue = rz_dmac_chan_get_residue(channel, cookie);
814
815 val = rz_dmac_ch_readl(channel, CHSTAT, 1);
816 if (val & CHSTAT_SUS)
817 status = DMA_PAUSED;
818 }
819
820 /* if there's no residue and no paused, the cookie is complete */
821 if (!residue && status != DMA_PAUSED)
822 return DMA_COMPLETE;
823
824 dma_set_residue(txstate, residue);
825
826 return status;
827 }
828
rz_dmac_device_pause(struct dma_chan * chan)829 static int rz_dmac_device_pause(struct dma_chan *chan)
830 {
831 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
832 u32 val;
833
834 guard(spinlock_irqsave)(&channel->vc.lock);
835
836 val = rz_dmac_ch_readl(channel, CHSTAT, 1);
837 if (!(val & CHSTAT_EN))
838 return 0;
839
840 rz_dmac_ch_writel(channel, CHCTRL_SETSUS, CHCTRL, 1);
841 return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
842 (val & CHSTAT_SUS), 1, 1024,
843 false, channel, CHSTAT, 1);
844 }
845
rz_dmac_device_resume(struct dma_chan * chan)846 static int rz_dmac_device_resume(struct dma_chan *chan)
847 {
848 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
849 u32 val;
850
851 guard(spinlock_irqsave)(&channel->vc.lock);
852
853 /* Do not check CHSTAT_SUS but rely on HW capabilities. */
854
855 rz_dmac_ch_writel(channel, CHCTRL_CLRSUS, CHCTRL, 1);
856 return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
857 !(val & CHSTAT_SUS), 1, 1024,
858 false, channel, CHSTAT, 1);
859 }
860
861 /*
862 * -----------------------------------------------------------------------------
863 * IRQ handling
864 */
865
rz_dmac_irq_handle_channel(struct rz_dmac_chan * channel)866 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
867 {
868 struct dma_chan *chan = &channel->vc.chan;
869 struct rz_dmac *dmac = to_rz_dmac(chan->device);
870 u32 chstat;
871
872 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
873 if (chstat & CHSTAT_ER) {
874 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
875 channel->index, chstat);
876
877 scoped_guard(spinlock_irqsave, &channel->vc.lock)
878 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
879 return;
880 }
881
882 /*
883 * No need to lock. This just clears the END interrupt. Writing
884 * zeros to CHCTRL is just ignored by HW.
885 */
886 rz_dmac_ch_writel(channel, CHCTRL_CLREND, CHCTRL, 1);
887 }
888
rz_dmac_irq_handler(int irq,void * dev_id)889 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
890 {
891 struct rz_dmac_chan *channel = dev_id;
892
893 if (channel) {
894 rz_dmac_irq_handle_channel(channel);
895 return IRQ_WAKE_THREAD;
896 }
897 /* handle DMAERR irq */
898 return IRQ_HANDLED;
899 }
900
rz_dmac_irq_handler_thread(int irq,void * dev_id)901 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
902 {
903 struct rz_dmac_chan *channel = dev_id;
904 struct rz_dmac_desc *desc = NULL;
905 unsigned long flags;
906
907 spin_lock_irqsave(&channel->vc.lock, flags);
908
909 if (list_empty(&channel->ld_active)) {
910 /* Someone might have called terminate all */
911 goto out;
912 }
913
914 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
915 vchan_cookie_complete(&desc->vd);
916 list_move_tail(channel->ld_active.next, &channel->ld_free);
917 if (!list_empty(&channel->ld_queue)) {
918 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
919 node);
920 channel->desc = desc;
921 if (rz_dmac_xfer_desc(channel) == 0)
922 list_move_tail(channel->ld_queue.next, &channel->ld_active);
923 }
924 out:
925 spin_unlock_irqrestore(&channel->vc.lock, flags);
926
927 return IRQ_HANDLED;
928 }
929
930 /*
931 * -----------------------------------------------------------------------------
932 * OF xlate and channel filter
933 */
934
rz_dmac_chan_filter(struct dma_chan * chan,void * arg)935 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
936 {
937 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
938 struct rz_dmac *dmac = to_rz_dmac(chan->device);
939 struct of_phandle_args *dma_spec = arg;
940 u32 ch_cfg;
941
942 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
943 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
944 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
945 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
946
947 return !test_and_set_bit(channel->mid_rid, dmac->modules);
948 }
949
rz_dmac_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)950 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
951 struct of_dma *ofdma)
952 {
953 dma_cap_mask_t mask;
954
955 if (dma_spec->args_count != 1)
956 return NULL;
957
958 /* Only slave DMA channels can be allocated via DT */
959 dma_cap_zero(mask);
960 dma_cap_set(DMA_SLAVE, mask);
961
962 return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
963 ofdma->of_node);
964 }
965
966 /*
967 * -----------------------------------------------------------------------------
968 * Probe and remove
969 */
970
rz_dmac_chan_probe(struct rz_dmac * dmac,struct rz_dmac_chan * channel,u8 index)971 static int rz_dmac_chan_probe(struct rz_dmac *dmac,
972 struct rz_dmac_chan *channel,
973 u8 index)
974 {
975 struct platform_device *pdev = to_platform_device(dmac->dev);
976 struct rz_lmdesc *lmdesc;
977 char pdev_irqname[6];
978 char *irqname;
979 int irq, ret;
980
981 channel->index = index;
982 channel->mid_rid = -EINVAL;
983
984 /* Request the channel interrupt. */
985 scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
986 irq = platform_get_irq_byname(pdev, pdev_irqname);
987 if (irq < 0)
988 return irq;
989
990 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
991 dev_name(dmac->dev), index);
992 if (!irqname)
993 return -ENOMEM;
994
995 ret = devm_request_threaded_irq(dmac->dev, irq, rz_dmac_irq_handler,
996 rz_dmac_irq_handler_thread, 0,
997 irqname, channel);
998 if (ret) {
999 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1000 return ret;
1001 }
1002
1003 /* Set io base address for each channel */
1004 if (index < 8) {
1005 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
1006 EACH_CHANNEL_OFFSET * index;
1007 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
1008 } else {
1009 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
1010 EACH_CHANNEL_OFFSET * (index - 8);
1011 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
1012 }
1013
1014 /* Allocate descriptors */
1015 lmdesc = dma_alloc_coherent(&pdev->dev,
1016 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
1017 &channel->lmdesc.base_dma, GFP_KERNEL);
1018 if (!lmdesc) {
1019 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
1020 return -ENOMEM;
1021 }
1022 rz_lmdesc_setup(channel, lmdesc);
1023
1024 /* Initialize register for each channel */
1025 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
1026
1027 channel->vc.desc_free = rz_dmac_virt_desc_free;
1028 vchan_init(&channel->vc, &dmac->engine);
1029 INIT_LIST_HEAD(&channel->ld_queue);
1030 INIT_LIST_HEAD(&channel->ld_free);
1031 INIT_LIST_HEAD(&channel->ld_active);
1032
1033 return 0;
1034 }
1035
rz_dmac_put_device(void * _dev)1036 static void rz_dmac_put_device(void *_dev)
1037 {
1038 struct device *dev = _dev;
1039
1040 put_device(dev);
1041 }
1042
rz_dmac_parse_of_icu(struct device * dev,struct rz_dmac * dmac)1043 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
1044 {
1045 struct device_node *np = dev->of_node;
1046 struct of_phandle_args args;
1047 uint32_t dmac_index;
1048 int ret;
1049
1050 if (!dmac->info->icu_register_dma_req)
1051 return 0;
1052
1053 ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
1054 if (ret)
1055 return ret;
1056
1057 dmac->icu.pdev = of_find_device_by_node(args.np);
1058 of_node_put(args.np);
1059 if (!dmac->icu.pdev) {
1060 dev_err(dev, "ICU device not found.\n");
1061 return -ENODEV;
1062 }
1063
1064 ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev);
1065 if (ret)
1066 return ret;
1067
1068 dmac_index = args.args[0];
1069 if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
1070 dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
1071 return -EINVAL;
1072 }
1073 dmac->icu.dmac_index = dmac_index;
1074
1075 return 0;
1076 }
1077
rz_dmac_parse_of(struct device * dev,struct rz_dmac * dmac)1078 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
1079 {
1080 struct device_node *np = dev->of_node;
1081 int ret;
1082
1083 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1084 if (ret < 0) {
1085 dev_err(dev, "unable to read dma-channels property\n");
1086 return ret;
1087 }
1088
1089 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
1090 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
1091 return -EINVAL;
1092 }
1093
1094 return rz_dmac_parse_of_icu(dev, dmac);
1095 }
1096
rz_dmac_probe(struct platform_device * pdev)1097 static int rz_dmac_probe(struct platform_device *pdev)
1098 {
1099 const char *irqname = "error";
1100 struct dma_device *engine;
1101 struct rz_dmac *dmac;
1102 int channel_num;
1103 int ret;
1104 int irq;
1105 u8 i;
1106
1107 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1108 if (!dmac)
1109 return -ENOMEM;
1110
1111 dmac->info = device_get_match_data(&pdev->dev);
1112 dmac->dev = &pdev->dev;
1113 platform_set_drvdata(pdev, dmac);
1114
1115 ret = rz_dmac_parse_of(&pdev->dev, dmac);
1116 if (ret < 0)
1117 return ret;
1118
1119 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1120 sizeof(*dmac->channels), GFP_KERNEL);
1121 if (!dmac->channels)
1122 return -ENOMEM;
1123
1124 /* Request resources */
1125 dmac->base = devm_platform_ioremap_resource(pdev, 0);
1126 if (IS_ERR(dmac->base))
1127 return PTR_ERR(dmac->base);
1128
1129 if (!dmac->info->icu_register_dma_req) {
1130 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
1131 if (IS_ERR(dmac->ext_base))
1132 return PTR_ERR(dmac->ext_base);
1133 }
1134
1135 /* Register interrupt handler for error */
1136 irq = platform_get_irq_byname_optional(pdev, irqname);
1137 if (irq > 0) {
1138 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
1139 irqname, NULL);
1140 if (ret) {
1141 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1142 irq, ret);
1143 return ret;
1144 }
1145 }
1146
1147 /* Initialize the channels. */
1148 INIT_LIST_HEAD(&dmac->engine.channels);
1149
1150 dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
1151 if (IS_ERR(dmac->rstc))
1152 return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc),
1153 "failed to get resets\n");
1154
1155 pm_runtime_enable(&pdev->dev);
1156 ret = pm_runtime_resume_and_get(&pdev->dev);
1157 if (ret < 0) {
1158 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
1159 goto err_pm_disable;
1160 }
1161
1162 ret = reset_control_deassert(dmac->rstc);
1163 if (ret)
1164 goto err_pm_runtime_put;
1165
1166 for (i = 0; i < dmac->n_channels; i++) {
1167 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
1168 if (ret < 0)
1169 goto err;
1170 }
1171
1172 /* Register the DMAC as a DMA provider for DT. */
1173 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
1174 NULL);
1175 if (ret < 0)
1176 goto err;
1177
1178 /* Register the DMA engine device. */
1179 engine = &dmac->engine;
1180 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1181 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1182 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1183 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
1184 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
1185
1186 engine->dev = &pdev->dev;
1187
1188 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
1189 engine->device_free_chan_resources = rz_dmac_free_chan_resources;
1190 engine->device_tx_status = rz_dmac_tx_status;
1191 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
1192 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
1193 engine->device_config = rz_dmac_config;
1194 engine->device_terminate_all = rz_dmac_terminate_all;
1195 engine->device_issue_pending = rz_dmac_issue_pending;
1196 engine->device_synchronize = rz_dmac_device_synchronize;
1197 engine->device_pause = rz_dmac_device_pause;
1198 engine->device_resume = rz_dmac_device_resume;
1199
1200 engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
1201 dma_set_max_seg_size(engine->dev, U32_MAX);
1202
1203 ret = dma_async_device_register(engine);
1204 if (ret < 0) {
1205 dev_err(&pdev->dev, "unable to register\n");
1206 goto dma_register_err;
1207 }
1208 return 0;
1209
1210 dma_register_err:
1211 of_dma_controller_free(pdev->dev.of_node);
1212 err:
1213 channel_num = i ? i - 1 : 0;
1214 for (i = 0; i < channel_num; i++) {
1215 struct rz_dmac_chan *channel = &dmac->channels[i];
1216
1217 dma_free_coherent(&pdev->dev,
1218 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
1219 channel->lmdesc.base,
1220 channel->lmdesc.base_dma);
1221 }
1222
1223 reset_control_assert(dmac->rstc);
1224 err_pm_runtime_put:
1225 pm_runtime_put(&pdev->dev);
1226 err_pm_disable:
1227 pm_runtime_disable(&pdev->dev);
1228
1229 return ret;
1230 }
1231
rz_dmac_remove(struct platform_device * pdev)1232 static void rz_dmac_remove(struct platform_device *pdev)
1233 {
1234 struct rz_dmac *dmac = platform_get_drvdata(pdev);
1235 unsigned int i;
1236
1237 dma_async_device_unregister(&dmac->engine);
1238 of_dma_controller_free(pdev->dev.of_node);
1239 for (i = 0; i < dmac->n_channels; i++) {
1240 struct rz_dmac_chan *channel = &dmac->channels[i];
1241
1242 dma_free_coherent(&pdev->dev,
1243 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
1244 channel->lmdesc.base,
1245 channel->lmdesc.base_dma);
1246 }
1247 reset_control_assert(dmac->rstc);
1248 pm_runtime_put(&pdev->dev);
1249 pm_runtime_disable(&pdev->dev);
1250 }
1251
1252 static const struct rz_dmac_info rz_dmac_v2h_info = {
1253 .icu_register_dma_req = rzv2h_icu_register_dma_req,
1254 .default_dma_req_no = RZV2H_ICU_DMAC_REQ_NO_DEFAULT,
1255 };
1256
1257 static const struct rz_dmac_info rz_dmac_t2h_info = {
1258 .icu_register_dma_req = rzt2h_icu_register_dma_req,
1259 .default_dma_req_no = RZT2H_ICU_DMAC_REQ_NO_DEFAULT,
1260 };
1261
1262 static const struct rz_dmac_info rz_dmac_generic_info = {
1263 .default_dma_req_no = 0,
1264 };
1265
1266 static const struct of_device_id of_rz_dmac_match[] = {
1267 { .compatible = "renesas,r9a09g057-dmac", .data = &rz_dmac_v2h_info },
1268 { .compatible = "renesas,r9a09g077-dmac", .data = &rz_dmac_t2h_info },
1269 { .compatible = "renesas,rz-dmac", .data = &rz_dmac_generic_info },
1270 { /* Sentinel */ }
1271 };
1272 MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
1273
1274 static struct platform_driver rz_dmac_driver = {
1275 .driver = {
1276 .name = "rz-dmac",
1277 .of_match_table = of_rz_dmac_match,
1278 },
1279 .probe = rz_dmac_probe,
1280 .remove = rz_dmac_remove,
1281 };
1282
1283 module_platform_driver(rz_dmac_driver);
1284
1285 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
1286 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
1287 MODULE_LICENSE("GPL v2");
1288