1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/G2L DMA Controller Driver
4 *
5 * Based on imx-dma.c
6 *
7 * Copyright (C) 2021 Renesas Electronics Corp.
8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/irqchip/irq-renesas-rzv2h.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28
29 #include "../dmaengine.h"
30 #include "../virt-dma.h"
31
32 enum rz_dmac_prep_type {
33 RZ_DMAC_DESC_MEMCPY,
34 RZ_DMAC_DESC_SLAVE_SG,
35 };
36
37 struct rz_lmdesc {
38 u32 header;
39 u32 sa;
40 u32 da;
41 u32 tb;
42 u32 chcfg;
43 u32 chitvl;
44 u32 chext;
45 u32 nxla;
46 };
47
48 struct rz_dmac_desc {
49 struct virt_dma_desc vd;
50 dma_addr_t src;
51 dma_addr_t dest;
52 size_t len;
53 struct list_head node;
54 enum dma_transfer_direction direction;
55 enum rz_dmac_prep_type type;
56 /* For slave sg */
57 struct scatterlist *sg;
58 unsigned int sgcount;
59 };
60
61 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
62
63 struct rz_dmac_chan {
64 struct virt_dma_chan vc;
65 void __iomem *ch_base;
66 void __iomem *ch_cmn_base;
67 unsigned int index;
68 struct rz_dmac_desc *desc;
69 int descs_allocated;
70
71 dma_addr_t src_per_address;
72 dma_addr_t dst_per_address;
73
74 u32 chcfg;
75 u32 chctrl;
76 int mid_rid;
77
78 struct list_head ld_free;
79 struct list_head ld_queue;
80 struct list_head ld_active;
81
82 struct {
83 struct rz_lmdesc *base;
84 struct rz_lmdesc *head;
85 struct rz_lmdesc *tail;
86 dma_addr_t base_dma;
87 } lmdesc;
88 };
89
90 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
91
92 struct rz_dmac_icu {
93 struct platform_device *pdev;
94 u8 dmac_index;
95 };
96
97 struct rz_dmac {
98 struct dma_device engine;
99 struct rz_dmac_icu icu;
100 struct device *dev;
101 struct reset_control *rstc;
102 void __iomem *base;
103 void __iomem *ext_base;
104
105 unsigned int n_channels;
106 struct rz_dmac_chan *channels;
107
108 bool has_icu;
109
110 DECLARE_BITMAP(modules, 1024);
111 };
112
113 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
114
115 /*
116 * -----------------------------------------------------------------------------
117 * Registers
118 */
119
120 #define CHSTAT 0x0024
121 #define CHCTRL 0x0028
122 #define CHCFG 0x002c
123 #define NXLA 0x0038
124
125 #define DCTRL 0x0000
126
127 #define EACH_CHANNEL_OFFSET 0x0040
128 #define CHANNEL_0_7_OFFSET 0x0000
129 #define CHANNEL_0_7_COMMON_BASE 0x0300
130 #define CHANNEL_8_15_OFFSET 0x0400
131 #define CHANNEL_8_15_COMMON_BASE 0x0700
132
133 #define CHSTAT_ER BIT(4)
134 #define CHSTAT_EN BIT(0)
135
136 #define CHCTRL_CLRINTMSK BIT(17)
137 #define CHCTRL_CLRSUS BIT(9)
138 #define CHCTRL_CLRTC BIT(6)
139 #define CHCTRL_CLREND BIT(5)
140 #define CHCTRL_CLRRQ BIT(4)
141 #define CHCTRL_SWRST BIT(3)
142 #define CHCTRL_STG BIT(2)
143 #define CHCTRL_CLREN BIT(1)
144 #define CHCTRL_SETEN BIT(0)
145 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
146 CHCTRL_CLRTC | CHCTRL_CLREND | \
147 CHCTRL_CLRRQ | CHCTRL_SWRST | \
148 CHCTRL_CLREN)
149
150 #define CHCFG_DMS BIT(31)
151 #define CHCFG_DEM BIT(24)
152 #define CHCFG_DAD BIT(21)
153 #define CHCFG_SAD BIT(20)
154 #define CHCFG_REQD BIT(3)
155 #define CHCFG_SEL(bits) ((bits) & 0x07)
156 #define CHCFG_MEM_COPY (0x80400008)
157 #define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
158 #define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
159 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
160 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
161 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
162 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
163
164 #define MID_RID_MASK GENMASK(9, 0)
165 #define CHCFG_MASK GENMASK(15, 10)
166 #define CHCFG_DS_INVALID 0xFF
167 #define DCTRL_LVINT BIT(1)
168 #define DCTRL_PR BIT(0)
169 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
170
171 /* LINK MODE DESCRIPTOR */
172 #define HEADER_LV BIT(0)
173
174 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
175 #define RZ_DMAC_MAX_CHANNELS 16
176 #define DMAC_NR_LMDESC 64
177
178 /* RZ/V2H ICU related */
179 #define RZV2H_MAX_DMAC_INDEX 4
180
181 /*
182 * -----------------------------------------------------------------------------
183 * Device access
184 */
185
rz_dmac_writel(struct rz_dmac * dmac,unsigned int val,unsigned int offset)186 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
187 unsigned int offset)
188 {
189 writel(val, dmac->base + offset);
190 }
191
rz_dmac_ext_writel(struct rz_dmac * dmac,unsigned int val,unsigned int offset)192 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
193 unsigned int offset)
194 {
195 writel(val, dmac->ext_base + offset);
196 }
197
rz_dmac_ext_readl(struct rz_dmac * dmac,unsigned int offset)198 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
199 {
200 return readl(dmac->ext_base + offset);
201 }
202
rz_dmac_ch_writel(struct rz_dmac_chan * channel,unsigned int val,unsigned int offset,int which)203 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
204 unsigned int offset, int which)
205 {
206 if (which)
207 writel(val, channel->ch_base + offset);
208 else
209 writel(val, channel->ch_cmn_base + offset);
210 }
211
rz_dmac_ch_readl(struct rz_dmac_chan * channel,unsigned int offset,int which)212 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
213 unsigned int offset, int which)
214 {
215 if (which)
216 return readl(channel->ch_base + offset);
217 else
218 return readl(channel->ch_cmn_base + offset);
219 }
220
221 /*
222 * -----------------------------------------------------------------------------
223 * Initialization
224 */
225
rz_lmdesc_setup(struct rz_dmac_chan * channel,struct rz_lmdesc * lmdesc)226 static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
227 struct rz_lmdesc *lmdesc)
228 {
229 u32 nxla;
230
231 channel->lmdesc.base = lmdesc;
232 channel->lmdesc.head = lmdesc;
233 channel->lmdesc.tail = lmdesc;
234 nxla = channel->lmdesc.base_dma;
235 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
236 lmdesc->header = 0;
237 nxla += sizeof(*lmdesc);
238 lmdesc->nxla = nxla;
239 lmdesc++;
240 }
241
242 lmdesc->header = 0;
243 lmdesc->nxla = channel->lmdesc.base_dma;
244 }
245
246 /*
247 * -----------------------------------------------------------------------------
248 * Descriptors preparation
249 */
250
rz_dmac_lmdesc_recycle(struct rz_dmac_chan * channel)251 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
252 {
253 struct rz_lmdesc *lmdesc = channel->lmdesc.head;
254
255 while (!(lmdesc->header & HEADER_LV)) {
256 lmdesc->header = 0;
257 lmdesc++;
258 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
259 lmdesc = channel->lmdesc.base;
260 }
261 channel->lmdesc.head = lmdesc;
262 }
263
rz_dmac_enable_hw(struct rz_dmac_chan * channel)264 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
265 {
266 struct dma_chan *chan = &channel->vc.chan;
267 struct rz_dmac *dmac = to_rz_dmac(chan->device);
268 unsigned long flags;
269 u32 nxla;
270 u32 chctrl;
271 u32 chstat;
272
273 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
274
275 local_irq_save(flags);
276
277 rz_dmac_lmdesc_recycle(channel);
278
279 nxla = channel->lmdesc.base_dma +
280 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
281 channel->lmdesc.base));
282
283 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
284 if (!(chstat & CHSTAT_EN)) {
285 chctrl = (channel->chctrl | CHCTRL_SETEN);
286 rz_dmac_ch_writel(channel, nxla, NXLA, 1);
287 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
288 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
289 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
290 }
291
292 local_irq_restore(flags);
293 }
294
rz_dmac_disable_hw(struct rz_dmac_chan * channel)295 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
296 {
297 struct dma_chan *chan = &channel->vc.chan;
298 struct rz_dmac *dmac = to_rz_dmac(chan->device);
299 unsigned long flags;
300
301 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
302
303 local_irq_save(flags);
304 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
305 local_irq_restore(flags);
306 }
307
rz_dmac_set_dmars_register(struct rz_dmac * dmac,int nr,u32 dmars)308 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
309 {
310 u32 dmars_offset = (nr / 2) * 4;
311 u32 shift = (nr % 2) * 16;
312 u32 dmars32;
313
314 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
315 dmars32 &= ~(0xffff << shift);
316 dmars32 |= dmars << shift;
317
318 rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
319 }
320
rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan * channel)321 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
322 {
323 struct dma_chan *chan = &channel->vc.chan;
324 struct rz_dmac *dmac = to_rz_dmac(chan->device);
325 struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
326 struct rz_dmac_desc *d = channel->desc;
327 u32 chcfg = CHCFG_MEM_COPY;
328
329 /* prepare descriptor */
330 lmdesc->sa = d->src;
331 lmdesc->da = d->dest;
332 lmdesc->tb = d->len;
333 lmdesc->chcfg = chcfg;
334 lmdesc->chitvl = 0;
335 lmdesc->chext = 0;
336 lmdesc->header = HEADER_LV;
337
338 if (dmac->has_icu) {
339 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
340 channel->index,
341 RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
342 } else {
343 rz_dmac_set_dmars_register(dmac, channel->index, 0);
344 }
345
346 channel->chcfg = chcfg;
347 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
348 }
349
rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan * channel)350 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
351 {
352 struct dma_chan *chan = &channel->vc.chan;
353 struct rz_dmac *dmac = to_rz_dmac(chan->device);
354 struct rz_dmac_desc *d = channel->desc;
355 struct scatterlist *sg, *sgl = d->sg;
356 struct rz_lmdesc *lmdesc;
357 unsigned int i, sg_len = d->sgcount;
358
359 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
360
361 if (d->direction == DMA_DEV_TO_MEM) {
362 channel->chcfg |= CHCFG_SAD;
363 channel->chcfg &= ~CHCFG_REQD;
364 } else {
365 channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
366 }
367
368 lmdesc = channel->lmdesc.tail;
369
370 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
371 if (d->direction == DMA_DEV_TO_MEM) {
372 lmdesc->sa = channel->src_per_address;
373 lmdesc->da = sg_dma_address(sg);
374 } else {
375 lmdesc->sa = sg_dma_address(sg);
376 lmdesc->da = channel->dst_per_address;
377 }
378
379 lmdesc->tb = sg_dma_len(sg);
380 lmdesc->chitvl = 0;
381 lmdesc->chext = 0;
382 if (i == (sg_len - 1)) {
383 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
384 lmdesc->header = HEADER_LV;
385 } else {
386 lmdesc->chcfg = channel->chcfg;
387 lmdesc->header = HEADER_LV;
388 }
389 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
390 lmdesc = channel->lmdesc.base;
391 }
392
393 channel->lmdesc.tail = lmdesc;
394
395 if (dmac->has_icu) {
396 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
397 channel->index, channel->mid_rid);
398 } else {
399 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
400 }
401
402 channel->chctrl = CHCTRL_SETEN;
403 }
404
rz_dmac_xfer_desc(struct rz_dmac_chan * chan)405 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
406 {
407 struct rz_dmac_desc *d = chan->desc;
408 struct virt_dma_desc *vd;
409
410 vd = vchan_next_desc(&chan->vc);
411 if (!vd)
412 return 0;
413
414 list_del(&vd->node);
415
416 switch (d->type) {
417 case RZ_DMAC_DESC_MEMCPY:
418 rz_dmac_prepare_desc_for_memcpy(chan);
419 break;
420
421 case RZ_DMAC_DESC_SLAVE_SG:
422 rz_dmac_prepare_descs_for_slave_sg(chan);
423 break;
424
425 default:
426 return -EINVAL;
427 }
428
429 rz_dmac_enable_hw(chan);
430
431 return 0;
432 }
433
434 /*
435 * -----------------------------------------------------------------------------
436 * DMA engine operations
437 */
438
rz_dmac_alloc_chan_resources(struct dma_chan * chan)439 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
440 {
441 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
442
443 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
444 struct rz_dmac_desc *desc;
445
446 desc = kzalloc_obj(*desc);
447 if (!desc)
448 break;
449
450 list_add_tail(&desc->node, &channel->ld_free);
451 channel->descs_allocated++;
452 }
453
454 if (!channel->descs_allocated)
455 return -ENOMEM;
456
457 return channel->descs_allocated;
458 }
459
rz_dmac_free_chan_resources(struct dma_chan * chan)460 static void rz_dmac_free_chan_resources(struct dma_chan *chan)
461 {
462 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
463 struct rz_dmac *dmac = to_rz_dmac(chan->device);
464 struct rz_lmdesc *lmdesc = channel->lmdesc.base;
465 struct rz_dmac_desc *desc, *_desc;
466 unsigned long flags;
467 unsigned int i;
468
469 spin_lock_irqsave(&channel->vc.lock, flags);
470
471 for (i = 0; i < DMAC_NR_LMDESC; i++)
472 lmdesc[i].header = 0;
473
474 rz_dmac_disable_hw(channel);
475 list_splice_tail_init(&channel->ld_active, &channel->ld_free);
476 list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
477
478 if (channel->mid_rid >= 0) {
479 clear_bit(channel->mid_rid, dmac->modules);
480 channel->mid_rid = -EINVAL;
481 }
482
483 spin_unlock_irqrestore(&channel->vc.lock, flags);
484
485 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
486 kfree(desc);
487 channel->descs_allocated--;
488 }
489
490 INIT_LIST_HEAD(&channel->ld_free);
491 vchan_free_chan_resources(&channel->vc);
492 }
493
494 static struct dma_async_tx_descriptor *
rz_dmac_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)495 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
496 size_t len, unsigned long flags)
497 {
498 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
499 struct rz_dmac *dmac = to_rz_dmac(chan->device);
500 struct rz_dmac_desc *desc;
501
502 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
503 __func__, channel->index, &src, &dest, len);
504
505 if (list_empty(&channel->ld_free))
506 return NULL;
507
508 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
509
510 desc->type = RZ_DMAC_DESC_MEMCPY;
511 desc->src = src;
512 desc->dest = dest;
513 desc->len = len;
514 desc->direction = DMA_MEM_TO_MEM;
515
516 list_move_tail(channel->ld_free.next, &channel->ld_queue);
517 return vchan_tx_prep(&channel->vc, &desc->vd, flags);
518 }
519
520 static struct dma_async_tx_descriptor *
rz_dmac_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)521 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
522 unsigned int sg_len,
523 enum dma_transfer_direction direction,
524 unsigned long flags, void *context)
525 {
526 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
527 struct rz_dmac_desc *desc;
528 struct scatterlist *sg;
529 int dma_length = 0;
530 int i = 0;
531
532 if (list_empty(&channel->ld_free))
533 return NULL;
534
535 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
536
537 for_each_sg(sgl, sg, sg_len, i) {
538 dma_length += sg_dma_len(sg);
539 }
540
541 desc->type = RZ_DMAC_DESC_SLAVE_SG;
542 desc->sg = sgl;
543 desc->sgcount = sg_len;
544 desc->len = dma_length;
545 desc->direction = direction;
546
547 if (direction == DMA_DEV_TO_MEM)
548 desc->src = channel->src_per_address;
549 else
550 desc->dest = channel->dst_per_address;
551
552 list_move_tail(channel->ld_free.next, &channel->ld_queue);
553 return vchan_tx_prep(&channel->vc, &desc->vd, flags);
554 }
555
rz_dmac_terminate_all(struct dma_chan * chan)556 static int rz_dmac_terminate_all(struct dma_chan *chan)
557 {
558 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
559 struct rz_lmdesc *lmdesc = channel->lmdesc.base;
560 unsigned long flags;
561 unsigned int i;
562 LIST_HEAD(head);
563
564 rz_dmac_disable_hw(channel);
565 spin_lock_irqsave(&channel->vc.lock, flags);
566 for (i = 0; i < DMAC_NR_LMDESC; i++)
567 lmdesc[i].header = 0;
568
569 list_splice_tail_init(&channel->ld_active, &channel->ld_free);
570 list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
571 vchan_get_all_descriptors(&channel->vc, &head);
572 spin_unlock_irqrestore(&channel->vc.lock, flags);
573 vchan_dma_desc_free_list(&channel->vc, &head);
574
575 return 0;
576 }
577
rz_dmac_issue_pending(struct dma_chan * chan)578 static void rz_dmac_issue_pending(struct dma_chan *chan)
579 {
580 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
581 struct rz_dmac *dmac = to_rz_dmac(chan->device);
582 struct rz_dmac_desc *desc;
583 unsigned long flags;
584
585 spin_lock_irqsave(&channel->vc.lock, flags);
586
587 if (!list_empty(&channel->ld_queue)) {
588 desc = list_first_entry(&channel->ld_queue,
589 struct rz_dmac_desc, node);
590 channel->desc = desc;
591 if (vchan_issue_pending(&channel->vc)) {
592 if (rz_dmac_xfer_desc(channel) < 0)
593 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
594 channel->index);
595 else
596 list_move_tail(channel->ld_queue.next,
597 &channel->ld_active);
598 }
599 }
600
601 spin_unlock_irqrestore(&channel->vc.lock, flags);
602 }
603
rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)604 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
605 {
606 u8 i;
607 static const enum dma_slave_buswidth ds_lut[] = {
608 DMA_SLAVE_BUSWIDTH_1_BYTE,
609 DMA_SLAVE_BUSWIDTH_2_BYTES,
610 DMA_SLAVE_BUSWIDTH_4_BYTES,
611 DMA_SLAVE_BUSWIDTH_8_BYTES,
612 DMA_SLAVE_BUSWIDTH_16_BYTES,
613 DMA_SLAVE_BUSWIDTH_32_BYTES,
614 DMA_SLAVE_BUSWIDTH_64_BYTES,
615 DMA_SLAVE_BUSWIDTH_128_BYTES,
616 };
617
618 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
619 if (ds_lut[i] == ds)
620 return i;
621 }
622
623 return CHCFG_DS_INVALID;
624 }
625
rz_dmac_config(struct dma_chan * chan,struct dma_slave_config * config)626 static int rz_dmac_config(struct dma_chan *chan,
627 struct dma_slave_config *config)
628 {
629 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
630 u32 val;
631
632 channel->dst_per_address = config->dst_addr;
633 channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
634 if (channel->dst_per_address) {
635 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
636 if (val == CHCFG_DS_INVALID)
637 return -EINVAL;
638
639 channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
640 }
641
642 channel->src_per_address = config->src_addr;
643 channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
644 if (channel->src_per_address) {
645 val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
646 if (val == CHCFG_DS_INVALID)
647 return -EINVAL;
648
649 channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
650 }
651
652 return 0;
653 }
654
rz_dmac_virt_desc_free(struct virt_dma_desc * vd)655 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
656 {
657 /*
658 * Place holder
659 * Descriptor allocation is done during alloc_chan_resources and
660 * get freed during free_chan_resources.
661 * list is used to manage the descriptors and avoid any memory
662 * allocation/free during DMA read/write.
663 */
664 }
665
rz_dmac_device_synchronize(struct dma_chan * chan)666 static void rz_dmac_device_synchronize(struct dma_chan *chan)
667 {
668 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
669 struct rz_dmac *dmac = to_rz_dmac(chan->device);
670 u32 chstat;
671 int ret;
672
673 ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
674 100, 100000, false, channel, CHSTAT, 1);
675 if (ret < 0)
676 dev_warn(dmac->dev, "DMA Timeout");
677
678 if (dmac->has_icu) {
679 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
680 channel->index,
681 RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
682 } else {
683 rz_dmac_set_dmars_register(dmac, channel->index, 0);
684 }
685 }
686
687 /*
688 * -----------------------------------------------------------------------------
689 * IRQ handling
690 */
691
rz_dmac_irq_handle_channel(struct rz_dmac_chan * channel)692 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
693 {
694 struct dma_chan *chan = &channel->vc.chan;
695 struct rz_dmac *dmac = to_rz_dmac(chan->device);
696 u32 chstat, chctrl;
697
698 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
699 if (chstat & CHSTAT_ER) {
700 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
701 channel->index, chstat);
702 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
703 goto done;
704 }
705
706 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
707 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
708 done:
709 return;
710 }
711
rz_dmac_irq_handler(int irq,void * dev_id)712 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
713 {
714 struct rz_dmac_chan *channel = dev_id;
715
716 if (channel) {
717 rz_dmac_irq_handle_channel(channel);
718 return IRQ_WAKE_THREAD;
719 }
720 /* handle DMAERR irq */
721 return IRQ_HANDLED;
722 }
723
rz_dmac_irq_handler_thread(int irq,void * dev_id)724 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
725 {
726 struct rz_dmac_chan *channel = dev_id;
727 struct rz_dmac_desc *desc = NULL;
728 unsigned long flags;
729
730 spin_lock_irqsave(&channel->vc.lock, flags);
731
732 if (list_empty(&channel->ld_active)) {
733 /* Someone might have called terminate all */
734 goto out;
735 }
736
737 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
738 vchan_cookie_complete(&desc->vd);
739 list_move_tail(channel->ld_active.next, &channel->ld_free);
740 if (!list_empty(&channel->ld_queue)) {
741 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
742 node);
743 channel->desc = desc;
744 if (rz_dmac_xfer_desc(channel) == 0)
745 list_move_tail(channel->ld_queue.next, &channel->ld_active);
746 }
747 out:
748 spin_unlock_irqrestore(&channel->vc.lock, flags);
749
750 return IRQ_HANDLED;
751 }
752
753 /*
754 * -----------------------------------------------------------------------------
755 * OF xlate and channel filter
756 */
757
rz_dmac_chan_filter(struct dma_chan * chan,void * arg)758 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
759 {
760 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
761 struct rz_dmac *dmac = to_rz_dmac(chan->device);
762 struct of_phandle_args *dma_spec = arg;
763 u32 ch_cfg;
764
765 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
766 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
767 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
768 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
769
770 return !test_and_set_bit(channel->mid_rid, dmac->modules);
771 }
772
rz_dmac_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)773 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
774 struct of_dma *ofdma)
775 {
776 dma_cap_mask_t mask;
777
778 if (dma_spec->args_count != 1)
779 return NULL;
780
781 /* Only slave DMA channels can be allocated via DT */
782 dma_cap_zero(mask);
783 dma_cap_set(DMA_SLAVE, mask);
784
785 return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
786 ofdma->of_node);
787 }
788
789 /*
790 * -----------------------------------------------------------------------------
791 * Probe and remove
792 */
793
rz_dmac_chan_probe(struct rz_dmac * dmac,struct rz_dmac_chan * channel,u8 index)794 static int rz_dmac_chan_probe(struct rz_dmac *dmac,
795 struct rz_dmac_chan *channel,
796 u8 index)
797 {
798 struct platform_device *pdev = to_platform_device(dmac->dev);
799 struct rz_lmdesc *lmdesc;
800 char pdev_irqname[6];
801 char *irqname;
802 int irq, ret;
803
804 channel->index = index;
805 channel->mid_rid = -EINVAL;
806
807 /* Request the channel interrupt. */
808 scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
809 irq = platform_get_irq_byname(pdev, pdev_irqname);
810 if (irq < 0)
811 return irq;
812
813 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
814 dev_name(dmac->dev), index);
815 if (!irqname)
816 return -ENOMEM;
817
818 ret = devm_request_threaded_irq(dmac->dev, irq, rz_dmac_irq_handler,
819 rz_dmac_irq_handler_thread, 0,
820 irqname, channel);
821 if (ret) {
822 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
823 return ret;
824 }
825
826 /* Set io base address for each channel */
827 if (index < 8) {
828 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
829 EACH_CHANNEL_OFFSET * index;
830 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
831 } else {
832 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
833 EACH_CHANNEL_OFFSET * (index - 8);
834 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
835 }
836
837 /* Allocate descriptors */
838 lmdesc = dma_alloc_coherent(&pdev->dev,
839 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
840 &channel->lmdesc.base_dma, GFP_KERNEL);
841 if (!lmdesc) {
842 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
843 return -ENOMEM;
844 }
845 rz_lmdesc_setup(channel, lmdesc);
846
847 /* Initialize register for each channel */
848 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
849
850 channel->vc.desc_free = rz_dmac_virt_desc_free;
851 vchan_init(&channel->vc, &dmac->engine);
852 INIT_LIST_HEAD(&channel->ld_queue);
853 INIT_LIST_HEAD(&channel->ld_free);
854 INIT_LIST_HEAD(&channel->ld_active);
855
856 return 0;
857 }
858
rz_dmac_put_device(void * _dev)859 static void rz_dmac_put_device(void *_dev)
860 {
861 struct device *dev = _dev;
862
863 put_device(dev);
864 }
865
rz_dmac_parse_of_icu(struct device * dev,struct rz_dmac * dmac)866 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
867 {
868 struct device_node *np = dev->of_node;
869 struct of_phandle_args args;
870 uint32_t dmac_index;
871 int ret;
872
873 ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
874 if (ret == -ENOENT)
875 return 0;
876 if (ret)
877 return ret;
878
879 dmac->has_icu = true;
880
881 dmac->icu.pdev = of_find_device_by_node(args.np);
882 of_node_put(args.np);
883 if (!dmac->icu.pdev) {
884 dev_err(dev, "ICU device not found.\n");
885 return -ENODEV;
886 }
887
888 ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev);
889 if (ret)
890 return ret;
891
892 dmac_index = args.args[0];
893 if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
894 dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
895 return -EINVAL;
896 }
897 dmac->icu.dmac_index = dmac_index;
898
899 return 0;
900 }
901
rz_dmac_parse_of(struct device * dev,struct rz_dmac * dmac)902 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
903 {
904 struct device_node *np = dev->of_node;
905 int ret;
906
907 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
908 if (ret < 0) {
909 dev_err(dev, "unable to read dma-channels property\n");
910 return ret;
911 }
912
913 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
914 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
915 return -EINVAL;
916 }
917
918 return rz_dmac_parse_of_icu(dev, dmac);
919 }
920
rz_dmac_probe(struct platform_device * pdev)921 static int rz_dmac_probe(struct platform_device *pdev)
922 {
923 const char *irqname = "error";
924 struct dma_device *engine;
925 struct rz_dmac *dmac;
926 int channel_num;
927 int ret;
928 int irq;
929 u8 i;
930
931 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
932 if (!dmac)
933 return -ENOMEM;
934
935 dmac->dev = &pdev->dev;
936 platform_set_drvdata(pdev, dmac);
937
938 ret = rz_dmac_parse_of(&pdev->dev, dmac);
939 if (ret < 0)
940 return ret;
941
942 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
943 sizeof(*dmac->channels), GFP_KERNEL);
944 if (!dmac->channels)
945 return -ENOMEM;
946
947 /* Request resources */
948 dmac->base = devm_platform_ioremap_resource(pdev, 0);
949 if (IS_ERR(dmac->base))
950 return PTR_ERR(dmac->base);
951
952 if (!dmac->has_icu) {
953 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
954 if (IS_ERR(dmac->ext_base))
955 return PTR_ERR(dmac->ext_base);
956 }
957
958 /* Register interrupt handler for error */
959 irq = platform_get_irq_byname(pdev, irqname);
960 if (irq < 0)
961 return irq;
962
963 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
964 irqname, NULL);
965 if (ret) {
966 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
967 irq, ret);
968 return ret;
969 }
970
971 /* Initialize the channels. */
972 INIT_LIST_HEAD(&dmac->engine.channels);
973
974 dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
975 if (IS_ERR(dmac->rstc))
976 return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc),
977 "failed to get resets\n");
978
979 pm_runtime_enable(&pdev->dev);
980 ret = pm_runtime_resume_and_get(&pdev->dev);
981 if (ret < 0) {
982 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
983 goto err_pm_disable;
984 }
985
986 ret = reset_control_deassert(dmac->rstc);
987 if (ret)
988 goto err_pm_runtime_put;
989
990 for (i = 0; i < dmac->n_channels; i++) {
991 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
992 if (ret < 0)
993 goto err;
994 }
995
996 /* Register the DMAC as a DMA provider for DT. */
997 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
998 NULL);
999 if (ret < 0)
1000 goto err;
1001
1002 /* Register the DMA engine device. */
1003 engine = &dmac->engine;
1004 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1005 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1006 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
1007 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
1008
1009 engine->dev = &pdev->dev;
1010
1011 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
1012 engine->device_free_chan_resources = rz_dmac_free_chan_resources;
1013 engine->device_tx_status = dma_cookie_status;
1014 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
1015 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
1016 engine->device_config = rz_dmac_config;
1017 engine->device_terminate_all = rz_dmac_terminate_all;
1018 engine->device_issue_pending = rz_dmac_issue_pending;
1019 engine->device_synchronize = rz_dmac_device_synchronize;
1020
1021 engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
1022 dma_set_max_seg_size(engine->dev, U32_MAX);
1023
1024 ret = dma_async_device_register(engine);
1025 if (ret < 0) {
1026 dev_err(&pdev->dev, "unable to register\n");
1027 goto dma_register_err;
1028 }
1029 return 0;
1030
1031 dma_register_err:
1032 of_dma_controller_free(pdev->dev.of_node);
1033 err:
1034 channel_num = i ? i - 1 : 0;
1035 for (i = 0; i < channel_num; i++) {
1036 struct rz_dmac_chan *channel = &dmac->channels[i];
1037
1038 dma_free_coherent(&pdev->dev,
1039 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
1040 channel->lmdesc.base,
1041 channel->lmdesc.base_dma);
1042 }
1043
1044 reset_control_assert(dmac->rstc);
1045 err_pm_runtime_put:
1046 pm_runtime_put(&pdev->dev);
1047 err_pm_disable:
1048 pm_runtime_disable(&pdev->dev);
1049
1050 return ret;
1051 }
1052
rz_dmac_remove(struct platform_device * pdev)1053 static void rz_dmac_remove(struct platform_device *pdev)
1054 {
1055 struct rz_dmac *dmac = platform_get_drvdata(pdev);
1056 unsigned int i;
1057
1058 dma_async_device_unregister(&dmac->engine);
1059 of_dma_controller_free(pdev->dev.of_node);
1060 for (i = 0; i < dmac->n_channels; i++) {
1061 struct rz_dmac_chan *channel = &dmac->channels[i];
1062
1063 dma_free_coherent(&pdev->dev,
1064 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
1065 channel->lmdesc.base,
1066 channel->lmdesc.base_dma);
1067 }
1068 reset_control_assert(dmac->rstc);
1069 pm_runtime_put(&pdev->dev);
1070 pm_runtime_disable(&pdev->dev);
1071 }
1072
1073 static const struct of_device_id of_rz_dmac_match[] = {
1074 { .compatible = "renesas,r9a09g057-dmac", },
1075 { .compatible = "renesas,rz-dmac", },
1076 { /* Sentinel */ }
1077 };
1078 MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
1079
1080 static struct platform_driver rz_dmac_driver = {
1081 .driver = {
1082 .name = "rz-dmac",
1083 .of_match_table = of_rz_dmac_match,
1084 },
1085 .probe = rz_dmac_probe,
1086 .remove = rz_dmac_remove,
1087 };
1088
1089 module_platform_driver(rz_dmac_driver);
1090
1091 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
1092 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
1093 MODULE_LICENSE("GPL v2");
1094