1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas RZ/G2L DMA Controller Driver 4 * 5 * Based on imx-dma.c 6 * 7 * Copyright (C) 2021 Renesas Electronics Corp. 8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/dmaengine.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/irqchip/irq-renesas-rzv2h.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_dma.h> 22 #include <linux/of_platform.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/reset.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 29 #include "../dmaengine.h" 30 #include "../virt-dma.h" 31 32 enum rz_dmac_prep_type { 33 RZ_DMAC_DESC_MEMCPY, 34 RZ_DMAC_DESC_SLAVE_SG, 35 }; 36 37 struct rz_lmdesc { 38 u32 header; 39 u32 sa; 40 u32 da; 41 u32 tb; 42 u32 chcfg; 43 u32 chitvl; 44 u32 chext; 45 u32 nxla; 46 }; 47 48 struct rz_dmac_desc { 49 struct virt_dma_desc vd; 50 dma_addr_t src; 51 dma_addr_t dest; 52 size_t len; 53 struct list_head node; 54 enum dma_transfer_direction direction; 55 enum rz_dmac_prep_type type; 56 /* For slave sg */ 57 struct scatterlist *sg; 58 unsigned int sgcount; 59 }; 60 61 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) 62 63 struct rz_dmac_chan { 64 struct virt_dma_chan vc; 65 void __iomem *ch_base; 66 void __iomem *ch_cmn_base; 67 unsigned int index; 68 int irq; 69 struct rz_dmac_desc *desc; 70 int descs_allocated; 71 72 dma_addr_t src_per_address; 73 dma_addr_t dst_per_address; 74 75 u32 chcfg; 76 u32 chctrl; 77 int mid_rid; 78 79 struct list_head ld_free; 80 struct list_head ld_queue; 81 struct list_head ld_active; 82 83 struct { 84 struct rz_lmdesc *base; 85 struct rz_lmdesc *head; 86 struct rz_lmdesc *tail; 87 dma_addr_t base_dma; 88 } lmdesc; 89 }; 90 91 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 92 93 struct rz_dmac_icu { 94 struct platform_device *pdev; 95 u8 dmac_index; 96 }; 97 98 struct rz_dmac { 99 struct dma_device engine; 100 struct rz_dmac_icu icu; 101 struct device *dev; 102 struct reset_control *rstc; 103 void __iomem *base; 104 void __iomem *ext_base; 105 106 unsigned int n_channels; 107 struct rz_dmac_chan *channels; 108 109 bool has_icu; 110 111 DECLARE_BITMAP(modules, 1024); 112 }; 113 114 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) 115 116 /* 117 * ----------------------------------------------------------------------------- 118 * Registers 119 */ 120 121 #define CHSTAT 0x0024 122 #define CHCTRL 0x0028 123 #define CHCFG 0x002c 124 #define NXLA 0x0038 125 126 #define DCTRL 0x0000 127 128 #define EACH_CHANNEL_OFFSET 0x0040 129 #define CHANNEL_0_7_OFFSET 0x0000 130 #define CHANNEL_0_7_COMMON_BASE 0x0300 131 #define CHANNEL_8_15_OFFSET 0x0400 132 #define CHANNEL_8_15_COMMON_BASE 0x0700 133 134 #define CHSTAT_ER BIT(4) 135 #define CHSTAT_EN BIT(0) 136 137 #define CHCTRL_CLRINTMSK BIT(17) 138 #define CHCTRL_CLRSUS BIT(9) 139 #define CHCTRL_CLRTC BIT(6) 140 #define CHCTRL_CLREND BIT(5) 141 #define CHCTRL_CLRRQ BIT(4) 142 #define CHCTRL_SWRST BIT(3) 143 #define CHCTRL_STG BIT(2) 144 #define CHCTRL_CLREN BIT(1) 145 #define CHCTRL_SETEN BIT(0) 146 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ 147 CHCTRL_CLRTC | CHCTRL_CLREND | \ 148 CHCTRL_CLRRQ | CHCTRL_SWRST | \ 149 CHCTRL_CLREN) 150 151 #define CHCFG_DMS BIT(31) 152 #define CHCFG_DEM BIT(24) 153 #define CHCFG_DAD BIT(21) 154 #define CHCFG_SAD BIT(20) 155 #define CHCFG_REQD BIT(3) 156 #define CHCFG_SEL(bits) ((bits) & 0x07) 157 #define CHCFG_MEM_COPY (0x80400008) 158 #define CHCFG_FILL_DDS_MASK GENMASK(19, 16) 159 #define CHCFG_FILL_SDS_MASK GENMASK(15, 12) 160 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) 161 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) 162 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) 163 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) 164 165 #define MID_RID_MASK GENMASK(9, 0) 166 #define CHCFG_MASK GENMASK(15, 10) 167 #define CHCFG_DS_INVALID 0xFF 168 #define DCTRL_LVINT BIT(1) 169 #define DCTRL_PR BIT(0) 170 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) 171 172 /* LINK MODE DESCRIPTOR */ 173 #define HEADER_LV BIT(0) 174 175 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 176 #define RZ_DMAC_MAX_CHANNELS 16 177 #define DMAC_NR_LMDESC 64 178 179 /* RZ/V2H ICU related */ 180 #define RZV2H_MAX_DMAC_INDEX 4 181 182 /* 183 * ----------------------------------------------------------------------------- 184 * Device access 185 */ 186 187 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, 188 unsigned int offset) 189 { 190 writel(val, dmac->base + offset); 191 } 192 193 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, 194 unsigned int offset) 195 { 196 writel(val, dmac->ext_base + offset); 197 } 198 199 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) 200 { 201 return readl(dmac->ext_base + offset); 202 } 203 204 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, 205 unsigned int offset, int which) 206 { 207 if (which) 208 writel(val, channel->ch_base + offset); 209 else 210 writel(val, channel->ch_cmn_base + offset); 211 } 212 213 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, 214 unsigned int offset, int which) 215 { 216 if (which) 217 return readl(channel->ch_base + offset); 218 else 219 return readl(channel->ch_cmn_base + offset); 220 } 221 222 /* 223 * ----------------------------------------------------------------------------- 224 * Initialization 225 */ 226 227 static void rz_lmdesc_setup(struct rz_dmac_chan *channel, 228 struct rz_lmdesc *lmdesc) 229 { 230 u32 nxla; 231 232 channel->lmdesc.base = lmdesc; 233 channel->lmdesc.head = lmdesc; 234 channel->lmdesc.tail = lmdesc; 235 nxla = channel->lmdesc.base_dma; 236 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { 237 lmdesc->header = 0; 238 nxla += sizeof(*lmdesc); 239 lmdesc->nxla = nxla; 240 lmdesc++; 241 } 242 243 lmdesc->header = 0; 244 lmdesc->nxla = channel->lmdesc.base_dma; 245 } 246 247 /* 248 * ----------------------------------------------------------------------------- 249 * Descriptors preparation 250 */ 251 252 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) 253 { 254 struct rz_lmdesc *lmdesc = channel->lmdesc.head; 255 256 while (!(lmdesc->header & HEADER_LV)) { 257 lmdesc->header = 0; 258 lmdesc++; 259 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 260 lmdesc = channel->lmdesc.base; 261 } 262 channel->lmdesc.head = lmdesc; 263 } 264 265 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) 266 { 267 struct dma_chan *chan = &channel->vc.chan; 268 struct rz_dmac *dmac = to_rz_dmac(chan->device); 269 unsigned long flags; 270 u32 nxla; 271 u32 chctrl; 272 u32 chstat; 273 274 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 275 276 local_irq_save(flags); 277 278 rz_dmac_lmdesc_recycle(channel); 279 280 nxla = channel->lmdesc.base_dma + 281 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - 282 channel->lmdesc.base)); 283 284 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 285 if (!(chstat & CHSTAT_EN)) { 286 chctrl = (channel->chctrl | CHCTRL_SETEN); 287 rz_dmac_ch_writel(channel, nxla, NXLA, 1); 288 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); 289 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); 290 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); 291 } 292 293 local_irq_restore(flags); 294 } 295 296 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) 297 { 298 struct dma_chan *chan = &channel->vc.chan; 299 struct rz_dmac *dmac = to_rz_dmac(chan->device); 300 unsigned long flags; 301 302 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 303 304 local_irq_save(flags); 305 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 306 local_irq_restore(flags); 307 } 308 309 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) 310 { 311 u32 dmars_offset = (nr / 2) * 4; 312 u32 shift = (nr % 2) * 16; 313 u32 dmars32; 314 315 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); 316 dmars32 &= ~(0xffff << shift); 317 dmars32 |= dmars << shift; 318 319 rz_dmac_ext_writel(dmac, dmars32, dmars_offset); 320 } 321 322 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) 323 { 324 struct dma_chan *chan = &channel->vc.chan; 325 struct rz_dmac *dmac = to_rz_dmac(chan->device); 326 struct rz_lmdesc *lmdesc = channel->lmdesc.tail; 327 struct rz_dmac_desc *d = channel->desc; 328 u32 chcfg = CHCFG_MEM_COPY; 329 330 /* prepare descriptor */ 331 lmdesc->sa = d->src; 332 lmdesc->da = d->dest; 333 lmdesc->tb = d->len; 334 lmdesc->chcfg = chcfg; 335 lmdesc->chitvl = 0; 336 lmdesc->chext = 0; 337 lmdesc->header = HEADER_LV; 338 339 if (dmac->has_icu) { 340 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 341 channel->index, 342 RZV2H_ICU_DMAC_REQ_NO_DEFAULT); 343 } else { 344 rz_dmac_set_dmars_register(dmac, channel->index, 0); 345 } 346 347 channel->chcfg = chcfg; 348 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; 349 } 350 351 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) 352 { 353 struct dma_chan *chan = &channel->vc.chan; 354 struct rz_dmac *dmac = to_rz_dmac(chan->device); 355 struct rz_dmac_desc *d = channel->desc; 356 struct scatterlist *sg, *sgl = d->sg; 357 struct rz_lmdesc *lmdesc; 358 unsigned int i, sg_len = d->sgcount; 359 360 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; 361 362 if (d->direction == DMA_DEV_TO_MEM) { 363 channel->chcfg |= CHCFG_SAD; 364 channel->chcfg &= ~CHCFG_REQD; 365 } else { 366 channel->chcfg |= CHCFG_DAD | CHCFG_REQD; 367 } 368 369 lmdesc = channel->lmdesc.tail; 370 371 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { 372 if (d->direction == DMA_DEV_TO_MEM) { 373 lmdesc->sa = channel->src_per_address; 374 lmdesc->da = sg_dma_address(sg); 375 } else { 376 lmdesc->sa = sg_dma_address(sg); 377 lmdesc->da = channel->dst_per_address; 378 } 379 380 lmdesc->tb = sg_dma_len(sg); 381 lmdesc->chitvl = 0; 382 lmdesc->chext = 0; 383 if (i == (sg_len - 1)) { 384 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); 385 lmdesc->header = HEADER_LV; 386 } else { 387 lmdesc->chcfg = channel->chcfg; 388 lmdesc->header = HEADER_LV; 389 } 390 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 391 lmdesc = channel->lmdesc.base; 392 } 393 394 channel->lmdesc.tail = lmdesc; 395 396 if (dmac->has_icu) { 397 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 398 channel->index, channel->mid_rid); 399 } else { 400 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 401 } 402 403 channel->chctrl = CHCTRL_SETEN; 404 } 405 406 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) 407 { 408 struct rz_dmac_desc *d = chan->desc; 409 struct virt_dma_desc *vd; 410 411 vd = vchan_next_desc(&chan->vc); 412 if (!vd) 413 return 0; 414 415 list_del(&vd->node); 416 417 switch (d->type) { 418 case RZ_DMAC_DESC_MEMCPY: 419 rz_dmac_prepare_desc_for_memcpy(chan); 420 break; 421 422 case RZ_DMAC_DESC_SLAVE_SG: 423 rz_dmac_prepare_descs_for_slave_sg(chan); 424 break; 425 426 default: 427 return -EINVAL; 428 } 429 430 rz_dmac_enable_hw(chan); 431 432 return 0; 433 } 434 435 /* 436 * ----------------------------------------------------------------------------- 437 * DMA engine operations 438 */ 439 440 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) 441 { 442 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 443 444 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { 445 struct rz_dmac_desc *desc; 446 447 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 448 if (!desc) 449 break; 450 451 list_add_tail(&desc->node, &channel->ld_free); 452 channel->descs_allocated++; 453 } 454 455 if (!channel->descs_allocated) 456 return -ENOMEM; 457 458 return channel->descs_allocated; 459 } 460 461 static void rz_dmac_free_chan_resources(struct dma_chan *chan) 462 { 463 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 464 struct rz_dmac *dmac = to_rz_dmac(chan->device); 465 struct rz_lmdesc *lmdesc = channel->lmdesc.base; 466 struct rz_dmac_desc *desc, *_desc; 467 unsigned long flags; 468 unsigned int i; 469 470 spin_lock_irqsave(&channel->vc.lock, flags); 471 472 for (i = 0; i < DMAC_NR_LMDESC; i++) 473 lmdesc[i].header = 0; 474 475 rz_dmac_disable_hw(channel); 476 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 477 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 478 479 if (channel->mid_rid >= 0) { 480 clear_bit(channel->mid_rid, dmac->modules); 481 channel->mid_rid = -EINVAL; 482 } 483 484 spin_unlock_irqrestore(&channel->vc.lock, flags); 485 486 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { 487 kfree(desc); 488 channel->descs_allocated--; 489 } 490 491 INIT_LIST_HEAD(&channel->ld_free); 492 vchan_free_chan_resources(&channel->vc); 493 } 494 495 static struct dma_async_tx_descriptor * 496 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 497 size_t len, unsigned long flags) 498 { 499 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 500 struct rz_dmac *dmac = to_rz_dmac(chan->device); 501 struct rz_dmac_desc *desc; 502 503 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 504 __func__, channel->index, &src, &dest, len); 505 506 if (list_empty(&channel->ld_free)) 507 return NULL; 508 509 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 510 511 desc->type = RZ_DMAC_DESC_MEMCPY; 512 desc->src = src; 513 desc->dest = dest; 514 desc->len = len; 515 desc->direction = DMA_MEM_TO_MEM; 516 517 list_move_tail(channel->ld_free.next, &channel->ld_queue); 518 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 519 } 520 521 static struct dma_async_tx_descriptor * 522 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 523 unsigned int sg_len, 524 enum dma_transfer_direction direction, 525 unsigned long flags, void *context) 526 { 527 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 528 struct rz_dmac_desc *desc; 529 struct scatterlist *sg; 530 int dma_length = 0; 531 int i = 0; 532 533 if (list_empty(&channel->ld_free)) 534 return NULL; 535 536 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 537 538 for_each_sg(sgl, sg, sg_len, i) { 539 dma_length += sg_dma_len(sg); 540 } 541 542 desc->type = RZ_DMAC_DESC_SLAVE_SG; 543 desc->sg = sgl; 544 desc->sgcount = sg_len; 545 desc->len = dma_length; 546 desc->direction = direction; 547 548 if (direction == DMA_DEV_TO_MEM) 549 desc->src = channel->src_per_address; 550 else 551 desc->dest = channel->dst_per_address; 552 553 list_move_tail(channel->ld_free.next, &channel->ld_queue); 554 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 555 } 556 557 static int rz_dmac_terminate_all(struct dma_chan *chan) 558 { 559 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 560 struct rz_lmdesc *lmdesc = channel->lmdesc.base; 561 unsigned long flags; 562 unsigned int i; 563 LIST_HEAD(head); 564 565 rz_dmac_disable_hw(channel); 566 spin_lock_irqsave(&channel->vc.lock, flags); 567 for (i = 0; i < DMAC_NR_LMDESC; i++) 568 lmdesc[i].header = 0; 569 570 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 571 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 572 vchan_get_all_descriptors(&channel->vc, &head); 573 spin_unlock_irqrestore(&channel->vc.lock, flags); 574 vchan_dma_desc_free_list(&channel->vc, &head); 575 576 return 0; 577 } 578 579 static void rz_dmac_issue_pending(struct dma_chan *chan) 580 { 581 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 582 struct rz_dmac *dmac = to_rz_dmac(chan->device); 583 struct rz_dmac_desc *desc; 584 unsigned long flags; 585 586 spin_lock_irqsave(&channel->vc.lock, flags); 587 588 if (!list_empty(&channel->ld_queue)) { 589 desc = list_first_entry(&channel->ld_queue, 590 struct rz_dmac_desc, node); 591 channel->desc = desc; 592 if (vchan_issue_pending(&channel->vc)) { 593 if (rz_dmac_xfer_desc(channel) < 0) 594 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", 595 channel->index); 596 else 597 list_move_tail(channel->ld_queue.next, 598 &channel->ld_active); 599 } 600 } 601 602 spin_unlock_irqrestore(&channel->vc.lock, flags); 603 } 604 605 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) 606 { 607 u8 i; 608 static const enum dma_slave_buswidth ds_lut[] = { 609 DMA_SLAVE_BUSWIDTH_1_BYTE, 610 DMA_SLAVE_BUSWIDTH_2_BYTES, 611 DMA_SLAVE_BUSWIDTH_4_BYTES, 612 DMA_SLAVE_BUSWIDTH_8_BYTES, 613 DMA_SLAVE_BUSWIDTH_16_BYTES, 614 DMA_SLAVE_BUSWIDTH_32_BYTES, 615 DMA_SLAVE_BUSWIDTH_64_BYTES, 616 DMA_SLAVE_BUSWIDTH_128_BYTES, 617 }; 618 619 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { 620 if (ds_lut[i] == ds) 621 return i; 622 } 623 624 return CHCFG_DS_INVALID; 625 } 626 627 static int rz_dmac_config(struct dma_chan *chan, 628 struct dma_slave_config *config) 629 { 630 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 631 u32 val; 632 633 channel->dst_per_address = config->dst_addr; 634 channel->chcfg &= ~CHCFG_FILL_DDS_MASK; 635 if (channel->dst_per_address) { 636 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); 637 if (val == CHCFG_DS_INVALID) 638 return -EINVAL; 639 640 channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); 641 } 642 643 channel->src_per_address = config->src_addr; 644 channel->chcfg &= ~CHCFG_FILL_SDS_MASK; 645 if (channel->src_per_address) { 646 val = rz_dmac_ds_to_val_mapping(config->src_addr_width); 647 if (val == CHCFG_DS_INVALID) 648 return -EINVAL; 649 650 channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); 651 } 652 653 return 0; 654 } 655 656 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) 657 { 658 /* 659 * Place holder 660 * Descriptor allocation is done during alloc_chan_resources and 661 * get freed during free_chan_resources. 662 * list is used to manage the descriptors and avoid any memory 663 * allocation/free during DMA read/write. 664 */ 665 } 666 667 static void rz_dmac_device_synchronize(struct dma_chan *chan) 668 { 669 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 670 struct rz_dmac *dmac = to_rz_dmac(chan->device); 671 u32 chstat; 672 int ret; 673 674 ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN), 675 100, 100000, false, channel, CHSTAT, 1); 676 if (ret < 0) 677 dev_warn(dmac->dev, "DMA Timeout"); 678 679 if (dmac->has_icu) { 680 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 681 channel->index, 682 RZV2H_ICU_DMAC_REQ_NO_DEFAULT); 683 } else { 684 rz_dmac_set_dmars_register(dmac, channel->index, 0); 685 } 686 } 687 688 /* 689 * ----------------------------------------------------------------------------- 690 * IRQ handling 691 */ 692 693 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) 694 { 695 struct dma_chan *chan = &channel->vc.chan; 696 struct rz_dmac *dmac = to_rz_dmac(chan->device); 697 u32 chstat, chctrl; 698 699 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 700 if (chstat & CHSTAT_ER) { 701 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 702 channel->index, chstat); 703 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 704 goto done; 705 } 706 707 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); 708 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); 709 done: 710 return; 711 } 712 713 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) 714 { 715 struct rz_dmac_chan *channel = dev_id; 716 717 if (channel) { 718 rz_dmac_irq_handle_channel(channel); 719 return IRQ_WAKE_THREAD; 720 } 721 /* handle DMAERR irq */ 722 return IRQ_HANDLED; 723 } 724 725 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) 726 { 727 struct rz_dmac_chan *channel = dev_id; 728 struct rz_dmac_desc *desc = NULL; 729 unsigned long flags; 730 731 spin_lock_irqsave(&channel->vc.lock, flags); 732 733 if (list_empty(&channel->ld_active)) { 734 /* Someone might have called terminate all */ 735 goto out; 736 } 737 738 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); 739 vchan_cookie_complete(&desc->vd); 740 list_move_tail(channel->ld_active.next, &channel->ld_free); 741 if (!list_empty(&channel->ld_queue)) { 742 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, 743 node); 744 channel->desc = desc; 745 if (rz_dmac_xfer_desc(channel) == 0) 746 list_move_tail(channel->ld_queue.next, &channel->ld_active); 747 } 748 out: 749 spin_unlock_irqrestore(&channel->vc.lock, flags); 750 751 return IRQ_HANDLED; 752 } 753 754 /* 755 * ----------------------------------------------------------------------------- 756 * OF xlate and channel filter 757 */ 758 759 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) 760 { 761 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 762 struct rz_dmac *dmac = to_rz_dmac(chan->device); 763 struct of_phandle_args *dma_spec = arg; 764 u32 ch_cfg; 765 766 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; 767 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; 768 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | 769 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); 770 771 return !test_and_set_bit(channel->mid_rid, dmac->modules); 772 } 773 774 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, 775 struct of_dma *ofdma) 776 { 777 dma_cap_mask_t mask; 778 779 if (dma_spec->args_count != 1) 780 return NULL; 781 782 /* Only slave DMA channels can be allocated via DT */ 783 dma_cap_zero(mask); 784 dma_cap_set(DMA_SLAVE, mask); 785 786 return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec, 787 ofdma->of_node); 788 } 789 790 /* 791 * ----------------------------------------------------------------------------- 792 * Probe and remove 793 */ 794 795 static int rz_dmac_chan_probe(struct rz_dmac *dmac, 796 struct rz_dmac_chan *channel, 797 u8 index) 798 { 799 struct platform_device *pdev = to_platform_device(dmac->dev); 800 struct rz_lmdesc *lmdesc; 801 char pdev_irqname[6]; 802 char *irqname; 803 int ret; 804 805 channel->index = index; 806 channel->mid_rid = -EINVAL; 807 808 /* Request the channel interrupt. */ 809 scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index); 810 channel->irq = platform_get_irq_byname(pdev, pdev_irqname); 811 if (channel->irq < 0) 812 return channel->irq; 813 814 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 815 dev_name(dmac->dev), index); 816 if (!irqname) 817 return -ENOMEM; 818 819 ret = devm_request_threaded_irq(dmac->dev, channel->irq, 820 rz_dmac_irq_handler, 821 rz_dmac_irq_handler_thread, 0, 822 irqname, channel); 823 if (ret) { 824 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 825 channel->irq, ret); 826 return ret; 827 } 828 829 /* Set io base address for each channel */ 830 if (index < 8) { 831 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + 832 EACH_CHANNEL_OFFSET * index; 833 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; 834 } else { 835 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + 836 EACH_CHANNEL_OFFSET * (index - 8); 837 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; 838 } 839 840 /* Allocate descriptors */ 841 lmdesc = dma_alloc_coherent(&pdev->dev, 842 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 843 &channel->lmdesc.base_dma, GFP_KERNEL); 844 if (!lmdesc) { 845 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); 846 return -ENOMEM; 847 } 848 rz_lmdesc_setup(channel, lmdesc); 849 850 /* Initialize register for each channel */ 851 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 852 853 channel->vc.desc_free = rz_dmac_virt_desc_free; 854 vchan_init(&channel->vc, &dmac->engine); 855 INIT_LIST_HEAD(&channel->ld_queue); 856 INIT_LIST_HEAD(&channel->ld_free); 857 INIT_LIST_HEAD(&channel->ld_active); 858 859 return 0; 860 } 861 862 static void rz_dmac_put_device(void *_dev) 863 { 864 struct device *dev = _dev; 865 866 put_device(dev); 867 } 868 869 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac) 870 { 871 struct device_node *np = dev->of_node; 872 struct of_phandle_args args; 873 uint32_t dmac_index; 874 int ret; 875 876 ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args); 877 if (ret == -ENOENT) 878 return 0; 879 if (ret) 880 return ret; 881 882 dmac->has_icu = true; 883 884 dmac->icu.pdev = of_find_device_by_node(args.np); 885 of_node_put(args.np); 886 if (!dmac->icu.pdev) { 887 dev_err(dev, "ICU device not found.\n"); 888 return -ENODEV; 889 } 890 891 ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev); 892 if (ret) 893 return ret; 894 895 dmac_index = args.args[0]; 896 if (dmac_index > RZV2H_MAX_DMAC_INDEX) { 897 dev_err(dev, "DMAC index %u invalid.\n", dmac_index); 898 return -EINVAL; 899 } 900 dmac->icu.dmac_index = dmac_index; 901 902 return 0; 903 } 904 905 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 906 { 907 struct device_node *np = dev->of_node; 908 int ret; 909 910 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 911 if (ret < 0) { 912 dev_err(dev, "unable to read dma-channels property\n"); 913 return ret; 914 } 915 916 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { 917 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); 918 return -EINVAL; 919 } 920 921 return rz_dmac_parse_of_icu(dev, dmac); 922 } 923 924 static int rz_dmac_probe(struct platform_device *pdev) 925 { 926 const char *irqname = "error"; 927 struct dma_device *engine; 928 struct rz_dmac *dmac; 929 int channel_num; 930 int ret; 931 int irq; 932 u8 i; 933 934 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 935 if (!dmac) 936 return -ENOMEM; 937 938 dmac->dev = &pdev->dev; 939 platform_set_drvdata(pdev, dmac); 940 941 ret = rz_dmac_parse_of(&pdev->dev, dmac); 942 if (ret < 0) 943 return ret; 944 945 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 946 sizeof(*dmac->channels), GFP_KERNEL); 947 if (!dmac->channels) 948 return -ENOMEM; 949 950 /* Request resources */ 951 dmac->base = devm_platform_ioremap_resource(pdev, 0); 952 if (IS_ERR(dmac->base)) 953 return PTR_ERR(dmac->base); 954 955 if (!dmac->has_icu) { 956 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 957 if (IS_ERR(dmac->ext_base)) 958 return PTR_ERR(dmac->ext_base); 959 } 960 961 /* Register interrupt handler for error */ 962 irq = platform_get_irq_byname(pdev, irqname); 963 if (irq < 0) 964 return irq; 965 966 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, 967 irqname, NULL); 968 if (ret) { 969 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 970 irq, ret); 971 return ret; 972 } 973 974 /* Initialize the channels. */ 975 INIT_LIST_HEAD(&dmac->engine.channels); 976 977 dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev); 978 if (IS_ERR(dmac->rstc)) 979 return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc), 980 "failed to get resets\n"); 981 982 pm_runtime_enable(&pdev->dev); 983 ret = pm_runtime_resume_and_get(&pdev->dev); 984 if (ret < 0) { 985 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); 986 goto err_pm_disable; 987 } 988 989 ret = reset_control_deassert(dmac->rstc); 990 if (ret) 991 goto err_pm_runtime_put; 992 993 for (i = 0; i < dmac->n_channels; i++) { 994 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); 995 if (ret < 0) 996 goto err; 997 } 998 999 /* Register the DMAC as a DMA provider for DT. */ 1000 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, 1001 NULL); 1002 if (ret < 0) 1003 goto err; 1004 1005 /* Register the DMA engine device. */ 1006 engine = &dmac->engine; 1007 dma_cap_set(DMA_SLAVE, engine->cap_mask); 1008 dma_cap_set(DMA_MEMCPY, engine->cap_mask); 1009 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); 1010 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); 1011 1012 engine->dev = &pdev->dev; 1013 1014 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; 1015 engine->device_free_chan_resources = rz_dmac_free_chan_resources; 1016 engine->device_tx_status = dma_cookie_status; 1017 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; 1018 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; 1019 engine->device_config = rz_dmac_config; 1020 engine->device_terminate_all = rz_dmac_terminate_all; 1021 engine->device_issue_pending = rz_dmac_issue_pending; 1022 engine->device_synchronize = rz_dmac_device_synchronize; 1023 1024 engine->copy_align = DMAENGINE_ALIGN_1_BYTE; 1025 dma_set_max_seg_size(engine->dev, U32_MAX); 1026 1027 ret = dma_async_device_register(engine); 1028 if (ret < 0) { 1029 dev_err(&pdev->dev, "unable to register\n"); 1030 goto dma_register_err; 1031 } 1032 return 0; 1033 1034 dma_register_err: 1035 of_dma_controller_free(pdev->dev.of_node); 1036 err: 1037 channel_num = i ? i - 1 : 0; 1038 for (i = 0; i < channel_num; i++) { 1039 struct rz_dmac_chan *channel = &dmac->channels[i]; 1040 1041 dma_free_coherent(&pdev->dev, 1042 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 1043 channel->lmdesc.base, 1044 channel->lmdesc.base_dma); 1045 } 1046 1047 reset_control_assert(dmac->rstc); 1048 err_pm_runtime_put: 1049 pm_runtime_put(&pdev->dev); 1050 err_pm_disable: 1051 pm_runtime_disable(&pdev->dev); 1052 1053 return ret; 1054 } 1055 1056 static void rz_dmac_remove(struct platform_device *pdev) 1057 { 1058 struct rz_dmac *dmac = platform_get_drvdata(pdev); 1059 unsigned int i; 1060 1061 dma_async_device_unregister(&dmac->engine); 1062 of_dma_controller_free(pdev->dev.of_node); 1063 for (i = 0; i < dmac->n_channels; i++) { 1064 struct rz_dmac_chan *channel = &dmac->channels[i]; 1065 1066 dma_free_coherent(&pdev->dev, 1067 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 1068 channel->lmdesc.base, 1069 channel->lmdesc.base_dma); 1070 } 1071 reset_control_assert(dmac->rstc); 1072 pm_runtime_put(&pdev->dev); 1073 pm_runtime_disable(&pdev->dev); 1074 } 1075 1076 static const struct of_device_id of_rz_dmac_match[] = { 1077 { .compatible = "renesas,r9a09g057-dmac", }, 1078 { .compatible = "renesas,rz-dmac", }, 1079 { /* Sentinel */ } 1080 }; 1081 MODULE_DEVICE_TABLE(of, of_rz_dmac_match); 1082 1083 static struct platform_driver rz_dmac_driver = { 1084 .driver = { 1085 .name = "rz-dmac", 1086 .of_match_table = of_rz_dmac_match, 1087 }, 1088 .probe = rz_dmac_probe, 1089 .remove = rz_dmac_remove, 1090 }; 1091 1092 module_platform_driver(rz_dmac_driver); 1093 1094 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); 1095 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 1096 MODULE_LICENSE("GPL v2"); 1097