1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas RZ/G2L DMA Controller Driver 4 * 5 * Based on imx-dma.c 6 * 7 * Copyright (C) 2021 Renesas Electronics Corp. 8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/dmaengine.h> 15 #include <linux/interrupt.h> 16 #include <linux/iopoll.h> 17 #include <linux/irqchip/irq-renesas-rzv2h.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_dma.h> 22 #include <linux/of_platform.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/reset.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 29 #include "../dmaengine.h" 30 #include "../virt-dma.h" 31 32 enum rz_dmac_prep_type { 33 RZ_DMAC_DESC_MEMCPY, 34 RZ_DMAC_DESC_SLAVE_SG, 35 }; 36 37 struct rz_lmdesc { 38 u32 header; 39 u32 sa; 40 u32 da; 41 u32 tb; 42 u32 chcfg; 43 u32 chitvl; 44 u32 chext; 45 u32 nxla; 46 }; 47 48 struct rz_dmac_desc { 49 struct virt_dma_desc vd; 50 dma_addr_t src; 51 dma_addr_t dest; 52 size_t len; 53 struct list_head node; 54 enum dma_transfer_direction direction; 55 enum rz_dmac_prep_type type; 56 /* For slave sg */ 57 struct scatterlist *sg; 58 unsigned int sgcount; 59 }; 60 61 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) 62 63 struct rz_dmac_chan { 64 struct virt_dma_chan vc; 65 void __iomem *ch_base; 66 void __iomem *ch_cmn_base; 67 unsigned int index; 68 int irq; 69 struct rz_dmac_desc *desc; 70 int descs_allocated; 71 72 dma_addr_t src_per_address; 73 dma_addr_t dst_per_address; 74 75 u32 chcfg; 76 u32 chctrl; 77 int mid_rid; 78 79 struct list_head ld_free; 80 struct list_head ld_queue; 81 struct list_head ld_active; 82 83 struct { 84 struct rz_lmdesc *base; 85 struct rz_lmdesc *head; 86 struct rz_lmdesc *tail; 87 dma_addr_t base_dma; 88 } lmdesc; 89 }; 90 91 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 92 93 struct rz_dmac_icu { 94 struct platform_device *pdev; 95 u8 dmac_index; 96 }; 97 98 struct rz_dmac { 99 struct dma_device engine; 100 struct rz_dmac_icu icu; 101 struct device *dev; 102 struct reset_control *rstc; 103 void __iomem *base; 104 void __iomem *ext_base; 105 106 unsigned int n_channels; 107 struct rz_dmac_chan *channels; 108 109 bool has_icu; 110 111 DECLARE_BITMAP(modules, 1024); 112 }; 113 114 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) 115 116 /* 117 * ----------------------------------------------------------------------------- 118 * Registers 119 */ 120 121 #define CHSTAT 0x0024 122 #define CHCTRL 0x0028 123 #define CHCFG 0x002c 124 #define NXLA 0x0038 125 126 #define DCTRL 0x0000 127 128 #define EACH_CHANNEL_OFFSET 0x0040 129 #define CHANNEL_0_7_OFFSET 0x0000 130 #define CHANNEL_0_7_COMMON_BASE 0x0300 131 #define CHANNEL_8_15_OFFSET 0x0400 132 #define CHANNEL_8_15_COMMON_BASE 0x0700 133 134 #define CHSTAT_ER BIT(4) 135 #define CHSTAT_EN BIT(0) 136 137 #define CHCTRL_CLRINTMSK BIT(17) 138 #define CHCTRL_CLRSUS BIT(9) 139 #define CHCTRL_CLRTC BIT(6) 140 #define CHCTRL_CLREND BIT(5) 141 #define CHCTRL_CLRRQ BIT(4) 142 #define CHCTRL_SWRST BIT(3) 143 #define CHCTRL_STG BIT(2) 144 #define CHCTRL_CLREN BIT(1) 145 #define CHCTRL_SETEN BIT(0) 146 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ 147 CHCTRL_CLRTC | CHCTRL_CLREND | \ 148 CHCTRL_CLRRQ | CHCTRL_SWRST | \ 149 CHCTRL_CLREN) 150 151 #define CHCFG_DMS BIT(31) 152 #define CHCFG_DEM BIT(24) 153 #define CHCFG_DAD BIT(21) 154 #define CHCFG_SAD BIT(20) 155 #define CHCFG_REQD BIT(3) 156 #define CHCFG_SEL(bits) ((bits) & 0x07) 157 #define CHCFG_MEM_COPY (0x80400008) 158 #define CHCFG_FILL_DDS_MASK GENMASK(19, 16) 159 #define CHCFG_FILL_SDS_MASK GENMASK(15, 12) 160 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) 161 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) 162 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) 163 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) 164 165 #define MID_RID_MASK GENMASK(9, 0) 166 #define CHCFG_MASK GENMASK(15, 10) 167 #define CHCFG_DS_INVALID 0xFF 168 #define DCTRL_LVINT BIT(1) 169 #define DCTRL_PR BIT(0) 170 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) 171 172 /* LINK MODE DESCRIPTOR */ 173 #define HEADER_LV BIT(0) 174 175 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 176 #define RZ_DMAC_MAX_CHANNELS 16 177 #define DMAC_NR_LMDESC 64 178 179 /* RZ/V2H ICU related */ 180 #define RZV2H_MAX_DMAC_INDEX 4 181 182 /* 183 * ----------------------------------------------------------------------------- 184 * Device access 185 */ 186 187 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, 188 unsigned int offset) 189 { 190 writel(val, dmac->base + offset); 191 } 192 193 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, 194 unsigned int offset) 195 { 196 writel(val, dmac->ext_base + offset); 197 } 198 199 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) 200 { 201 return readl(dmac->ext_base + offset); 202 } 203 204 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, 205 unsigned int offset, int which) 206 { 207 if (which) 208 writel(val, channel->ch_base + offset); 209 else 210 writel(val, channel->ch_cmn_base + offset); 211 } 212 213 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, 214 unsigned int offset, int which) 215 { 216 if (which) 217 return readl(channel->ch_base + offset); 218 else 219 return readl(channel->ch_cmn_base + offset); 220 } 221 222 /* 223 * ----------------------------------------------------------------------------- 224 * Initialization 225 */ 226 227 static void rz_lmdesc_setup(struct rz_dmac_chan *channel, 228 struct rz_lmdesc *lmdesc) 229 { 230 u32 nxla; 231 232 channel->lmdesc.base = lmdesc; 233 channel->lmdesc.head = lmdesc; 234 channel->lmdesc.tail = lmdesc; 235 nxla = channel->lmdesc.base_dma; 236 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { 237 lmdesc->header = 0; 238 nxla += sizeof(*lmdesc); 239 lmdesc->nxla = nxla; 240 lmdesc++; 241 } 242 243 lmdesc->header = 0; 244 lmdesc->nxla = channel->lmdesc.base_dma; 245 } 246 247 /* 248 * ----------------------------------------------------------------------------- 249 * Descriptors preparation 250 */ 251 252 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) 253 { 254 struct rz_lmdesc *lmdesc = channel->lmdesc.head; 255 256 while (!(lmdesc->header & HEADER_LV)) { 257 lmdesc->header = 0; 258 lmdesc++; 259 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 260 lmdesc = channel->lmdesc.base; 261 } 262 channel->lmdesc.head = lmdesc; 263 } 264 265 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) 266 { 267 struct dma_chan *chan = &channel->vc.chan; 268 struct rz_dmac *dmac = to_rz_dmac(chan->device); 269 unsigned long flags; 270 u32 nxla; 271 u32 chctrl; 272 u32 chstat; 273 274 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 275 276 local_irq_save(flags); 277 278 rz_dmac_lmdesc_recycle(channel); 279 280 nxla = channel->lmdesc.base_dma + 281 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - 282 channel->lmdesc.base)); 283 284 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 285 if (!(chstat & CHSTAT_EN)) { 286 chctrl = (channel->chctrl | CHCTRL_SETEN); 287 rz_dmac_ch_writel(channel, nxla, NXLA, 1); 288 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); 289 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); 290 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); 291 } 292 293 local_irq_restore(flags); 294 } 295 296 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) 297 { 298 struct dma_chan *chan = &channel->vc.chan; 299 struct rz_dmac *dmac = to_rz_dmac(chan->device); 300 unsigned long flags; 301 302 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 303 304 local_irq_save(flags); 305 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 306 local_irq_restore(flags); 307 } 308 309 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) 310 { 311 u32 dmars_offset = (nr / 2) * 4; 312 u32 shift = (nr % 2) * 16; 313 u32 dmars32; 314 315 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); 316 dmars32 &= ~(0xffff << shift); 317 dmars32 |= dmars << shift; 318 319 rz_dmac_ext_writel(dmac, dmars32, dmars_offset); 320 } 321 322 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) 323 { 324 struct dma_chan *chan = &channel->vc.chan; 325 struct rz_dmac *dmac = to_rz_dmac(chan->device); 326 struct rz_lmdesc *lmdesc = channel->lmdesc.tail; 327 struct rz_dmac_desc *d = channel->desc; 328 u32 chcfg = CHCFG_MEM_COPY; 329 330 /* prepare descriptor */ 331 lmdesc->sa = d->src; 332 lmdesc->da = d->dest; 333 lmdesc->tb = d->len; 334 lmdesc->chcfg = chcfg; 335 lmdesc->chitvl = 0; 336 lmdesc->chext = 0; 337 lmdesc->header = HEADER_LV; 338 339 if (dmac->has_icu) { 340 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 341 channel->index, 342 RZV2H_ICU_DMAC_REQ_NO_DEFAULT); 343 } else { 344 rz_dmac_set_dmars_register(dmac, channel->index, 0); 345 } 346 347 channel->chcfg = chcfg; 348 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; 349 } 350 351 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) 352 { 353 struct dma_chan *chan = &channel->vc.chan; 354 struct rz_dmac *dmac = to_rz_dmac(chan->device); 355 struct rz_dmac_desc *d = channel->desc; 356 struct scatterlist *sg, *sgl = d->sg; 357 struct rz_lmdesc *lmdesc; 358 unsigned int i, sg_len = d->sgcount; 359 360 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; 361 362 if (d->direction == DMA_DEV_TO_MEM) { 363 channel->chcfg |= CHCFG_SAD; 364 channel->chcfg &= ~CHCFG_REQD; 365 } else { 366 channel->chcfg |= CHCFG_DAD | CHCFG_REQD; 367 } 368 369 lmdesc = channel->lmdesc.tail; 370 371 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { 372 if (d->direction == DMA_DEV_TO_MEM) { 373 lmdesc->sa = channel->src_per_address; 374 lmdesc->da = sg_dma_address(sg); 375 } else { 376 lmdesc->sa = sg_dma_address(sg); 377 lmdesc->da = channel->dst_per_address; 378 } 379 380 lmdesc->tb = sg_dma_len(sg); 381 lmdesc->chitvl = 0; 382 lmdesc->chext = 0; 383 if (i == (sg_len - 1)) { 384 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); 385 lmdesc->header = HEADER_LV; 386 } else { 387 lmdesc->chcfg = channel->chcfg; 388 lmdesc->header = HEADER_LV; 389 } 390 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 391 lmdesc = channel->lmdesc.base; 392 } 393 394 channel->lmdesc.tail = lmdesc; 395 396 if (dmac->has_icu) { 397 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 398 channel->index, channel->mid_rid); 399 } else { 400 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 401 } 402 403 channel->chctrl = CHCTRL_SETEN; 404 } 405 406 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) 407 { 408 struct rz_dmac_desc *d = chan->desc; 409 struct virt_dma_desc *vd; 410 411 vd = vchan_next_desc(&chan->vc); 412 if (!vd) 413 return 0; 414 415 list_del(&vd->node); 416 417 switch (d->type) { 418 case RZ_DMAC_DESC_MEMCPY: 419 rz_dmac_prepare_desc_for_memcpy(chan); 420 break; 421 422 case RZ_DMAC_DESC_SLAVE_SG: 423 rz_dmac_prepare_descs_for_slave_sg(chan); 424 break; 425 426 default: 427 return -EINVAL; 428 } 429 430 rz_dmac_enable_hw(chan); 431 432 return 0; 433 } 434 435 /* 436 * ----------------------------------------------------------------------------- 437 * DMA engine operations 438 */ 439 440 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) 441 { 442 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 443 444 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { 445 struct rz_dmac_desc *desc; 446 447 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 448 if (!desc) 449 break; 450 451 list_add_tail(&desc->node, &channel->ld_free); 452 channel->descs_allocated++; 453 } 454 455 if (!channel->descs_allocated) 456 return -ENOMEM; 457 458 return channel->descs_allocated; 459 } 460 461 static void rz_dmac_free_chan_resources(struct dma_chan *chan) 462 { 463 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 464 struct rz_dmac *dmac = to_rz_dmac(chan->device); 465 struct rz_lmdesc *lmdesc = channel->lmdesc.base; 466 struct rz_dmac_desc *desc, *_desc; 467 unsigned long flags; 468 unsigned int i; 469 470 spin_lock_irqsave(&channel->vc.lock, flags); 471 472 for (i = 0; i < DMAC_NR_LMDESC; i++) 473 lmdesc[i].header = 0; 474 475 rz_dmac_disable_hw(channel); 476 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 477 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 478 479 if (channel->mid_rid >= 0) { 480 clear_bit(channel->mid_rid, dmac->modules); 481 channel->mid_rid = -EINVAL; 482 } 483 484 spin_unlock_irqrestore(&channel->vc.lock, flags); 485 486 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { 487 kfree(desc); 488 channel->descs_allocated--; 489 } 490 491 INIT_LIST_HEAD(&channel->ld_free); 492 vchan_free_chan_resources(&channel->vc); 493 } 494 495 static struct dma_async_tx_descriptor * 496 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 497 size_t len, unsigned long flags) 498 { 499 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 500 struct rz_dmac *dmac = to_rz_dmac(chan->device); 501 struct rz_dmac_desc *desc; 502 503 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 504 __func__, channel->index, &src, &dest, len); 505 506 if (list_empty(&channel->ld_free)) 507 return NULL; 508 509 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 510 511 desc->type = RZ_DMAC_DESC_MEMCPY; 512 desc->src = src; 513 desc->dest = dest; 514 desc->len = len; 515 desc->direction = DMA_MEM_TO_MEM; 516 517 list_move_tail(channel->ld_free.next, &channel->ld_queue); 518 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 519 } 520 521 static struct dma_async_tx_descriptor * 522 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 523 unsigned int sg_len, 524 enum dma_transfer_direction direction, 525 unsigned long flags, void *context) 526 { 527 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 528 struct rz_dmac_desc *desc; 529 struct scatterlist *sg; 530 int dma_length = 0; 531 int i = 0; 532 533 if (list_empty(&channel->ld_free)) 534 return NULL; 535 536 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 537 538 for_each_sg(sgl, sg, sg_len, i) { 539 dma_length += sg_dma_len(sg); 540 } 541 542 desc->type = RZ_DMAC_DESC_SLAVE_SG; 543 desc->sg = sgl; 544 desc->sgcount = sg_len; 545 desc->len = dma_length; 546 desc->direction = direction; 547 548 if (direction == DMA_DEV_TO_MEM) 549 desc->src = channel->src_per_address; 550 else 551 desc->dest = channel->dst_per_address; 552 553 list_move_tail(channel->ld_free.next, &channel->ld_queue); 554 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 555 } 556 557 static int rz_dmac_terminate_all(struct dma_chan *chan) 558 { 559 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 560 unsigned long flags; 561 LIST_HEAD(head); 562 563 rz_dmac_disable_hw(channel); 564 spin_lock_irqsave(&channel->vc.lock, flags); 565 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 566 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 567 vchan_get_all_descriptors(&channel->vc, &head); 568 spin_unlock_irqrestore(&channel->vc.lock, flags); 569 vchan_dma_desc_free_list(&channel->vc, &head); 570 571 return 0; 572 } 573 574 static void rz_dmac_issue_pending(struct dma_chan *chan) 575 { 576 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 577 struct rz_dmac *dmac = to_rz_dmac(chan->device); 578 struct rz_dmac_desc *desc; 579 unsigned long flags; 580 581 spin_lock_irqsave(&channel->vc.lock, flags); 582 583 if (!list_empty(&channel->ld_queue)) { 584 desc = list_first_entry(&channel->ld_queue, 585 struct rz_dmac_desc, node); 586 channel->desc = desc; 587 if (vchan_issue_pending(&channel->vc)) { 588 if (rz_dmac_xfer_desc(channel) < 0) 589 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", 590 channel->index); 591 else 592 list_move_tail(channel->ld_queue.next, 593 &channel->ld_active); 594 } 595 } 596 597 spin_unlock_irqrestore(&channel->vc.lock, flags); 598 } 599 600 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) 601 { 602 u8 i; 603 static const enum dma_slave_buswidth ds_lut[] = { 604 DMA_SLAVE_BUSWIDTH_1_BYTE, 605 DMA_SLAVE_BUSWIDTH_2_BYTES, 606 DMA_SLAVE_BUSWIDTH_4_BYTES, 607 DMA_SLAVE_BUSWIDTH_8_BYTES, 608 DMA_SLAVE_BUSWIDTH_16_BYTES, 609 DMA_SLAVE_BUSWIDTH_32_BYTES, 610 DMA_SLAVE_BUSWIDTH_64_BYTES, 611 DMA_SLAVE_BUSWIDTH_128_BYTES, 612 }; 613 614 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { 615 if (ds_lut[i] == ds) 616 return i; 617 } 618 619 return CHCFG_DS_INVALID; 620 } 621 622 static int rz_dmac_config(struct dma_chan *chan, 623 struct dma_slave_config *config) 624 { 625 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 626 u32 val; 627 628 channel->dst_per_address = config->dst_addr; 629 channel->chcfg &= ~CHCFG_FILL_DDS_MASK; 630 if (channel->dst_per_address) { 631 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); 632 if (val == CHCFG_DS_INVALID) 633 return -EINVAL; 634 635 channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); 636 } 637 638 channel->src_per_address = config->src_addr; 639 channel->chcfg &= ~CHCFG_FILL_SDS_MASK; 640 if (channel->src_per_address) { 641 val = rz_dmac_ds_to_val_mapping(config->src_addr_width); 642 if (val == CHCFG_DS_INVALID) 643 return -EINVAL; 644 645 channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); 646 } 647 648 return 0; 649 } 650 651 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) 652 { 653 /* 654 * Place holder 655 * Descriptor allocation is done during alloc_chan_resources and 656 * get freed during free_chan_resources. 657 * list is used to manage the descriptors and avoid any memory 658 * allocation/free during DMA read/write. 659 */ 660 } 661 662 static void rz_dmac_device_synchronize(struct dma_chan *chan) 663 { 664 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 665 struct rz_dmac *dmac = to_rz_dmac(chan->device); 666 u32 chstat; 667 int ret; 668 669 ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN), 670 100, 100000, false, channel, CHSTAT, 1); 671 if (ret < 0) 672 dev_warn(dmac->dev, "DMA Timeout"); 673 674 if (dmac->has_icu) { 675 rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 676 channel->index, 677 RZV2H_ICU_DMAC_REQ_NO_DEFAULT); 678 } else { 679 rz_dmac_set_dmars_register(dmac, channel->index, 0); 680 } 681 } 682 683 /* 684 * ----------------------------------------------------------------------------- 685 * IRQ handling 686 */ 687 688 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) 689 { 690 struct dma_chan *chan = &channel->vc.chan; 691 struct rz_dmac *dmac = to_rz_dmac(chan->device); 692 u32 chstat, chctrl; 693 694 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 695 if (chstat & CHSTAT_ER) { 696 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 697 channel->index, chstat); 698 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 699 goto done; 700 } 701 702 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); 703 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); 704 done: 705 return; 706 } 707 708 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) 709 { 710 struct rz_dmac_chan *channel = dev_id; 711 712 if (channel) { 713 rz_dmac_irq_handle_channel(channel); 714 return IRQ_WAKE_THREAD; 715 } 716 /* handle DMAERR irq */ 717 return IRQ_HANDLED; 718 } 719 720 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) 721 { 722 struct rz_dmac_chan *channel = dev_id; 723 struct rz_dmac_desc *desc = NULL; 724 unsigned long flags; 725 726 spin_lock_irqsave(&channel->vc.lock, flags); 727 728 if (list_empty(&channel->ld_active)) { 729 /* Someone might have called terminate all */ 730 goto out; 731 } 732 733 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); 734 vchan_cookie_complete(&desc->vd); 735 list_move_tail(channel->ld_active.next, &channel->ld_free); 736 if (!list_empty(&channel->ld_queue)) { 737 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, 738 node); 739 channel->desc = desc; 740 if (rz_dmac_xfer_desc(channel) == 0) 741 list_move_tail(channel->ld_queue.next, &channel->ld_active); 742 } 743 out: 744 spin_unlock_irqrestore(&channel->vc.lock, flags); 745 746 return IRQ_HANDLED; 747 } 748 749 /* 750 * ----------------------------------------------------------------------------- 751 * OF xlate and channel filter 752 */ 753 754 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) 755 { 756 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 757 struct rz_dmac *dmac = to_rz_dmac(chan->device); 758 struct of_phandle_args *dma_spec = arg; 759 u32 ch_cfg; 760 761 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; 762 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; 763 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | 764 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); 765 766 return !test_and_set_bit(channel->mid_rid, dmac->modules); 767 } 768 769 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, 770 struct of_dma *ofdma) 771 { 772 dma_cap_mask_t mask; 773 774 if (dma_spec->args_count != 1) 775 return NULL; 776 777 /* Only slave DMA channels can be allocated via DT */ 778 dma_cap_zero(mask); 779 dma_cap_set(DMA_SLAVE, mask); 780 781 return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec, 782 ofdma->of_node); 783 } 784 785 /* 786 * ----------------------------------------------------------------------------- 787 * Probe and remove 788 */ 789 790 static int rz_dmac_chan_probe(struct rz_dmac *dmac, 791 struct rz_dmac_chan *channel, 792 u8 index) 793 { 794 struct platform_device *pdev = to_platform_device(dmac->dev); 795 struct rz_lmdesc *lmdesc; 796 char pdev_irqname[6]; 797 char *irqname; 798 int ret; 799 800 channel->index = index; 801 channel->mid_rid = -EINVAL; 802 803 /* Request the channel interrupt. */ 804 scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index); 805 channel->irq = platform_get_irq_byname(pdev, pdev_irqname); 806 if (channel->irq < 0) 807 return channel->irq; 808 809 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 810 dev_name(dmac->dev), index); 811 if (!irqname) 812 return -ENOMEM; 813 814 ret = devm_request_threaded_irq(dmac->dev, channel->irq, 815 rz_dmac_irq_handler, 816 rz_dmac_irq_handler_thread, 0, 817 irqname, channel); 818 if (ret) { 819 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 820 channel->irq, ret); 821 return ret; 822 } 823 824 /* Set io base address for each channel */ 825 if (index < 8) { 826 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + 827 EACH_CHANNEL_OFFSET * index; 828 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; 829 } else { 830 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + 831 EACH_CHANNEL_OFFSET * (index - 8); 832 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; 833 } 834 835 /* Allocate descriptors */ 836 lmdesc = dma_alloc_coherent(&pdev->dev, 837 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 838 &channel->lmdesc.base_dma, GFP_KERNEL); 839 if (!lmdesc) { 840 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); 841 return -ENOMEM; 842 } 843 rz_lmdesc_setup(channel, lmdesc); 844 845 /* Initialize register for each channel */ 846 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 847 848 channel->vc.desc_free = rz_dmac_virt_desc_free; 849 vchan_init(&channel->vc, &dmac->engine); 850 INIT_LIST_HEAD(&channel->ld_queue); 851 INIT_LIST_HEAD(&channel->ld_free); 852 INIT_LIST_HEAD(&channel->ld_active); 853 854 return 0; 855 } 856 857 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac) 858 { 859 struct device_node *np = dev->of_node; 860 struct of_phandle_args args; 861 uint32_t dmac_index; 862 int ret; 863 864 ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args); 865 if (ret == -ENOENT) 866 return 0; 867 if (ret) 868 return ret; 869 870 dmac->has_icu = true; 871 872 dmac->icu.pdev = of_find_device_by_node(args.np); 873 of_node_put(args.np); 874 if (!dmac->icu.pdev) { 875 dev_err(dev, "ICU device not found.\n"); 876 return -ENODEV; 877 } 878 879 dmac_index = args.args[0]; 880 if (dmac_index > RZV2H_MAX_DMAC_INDEX) { 881 dev_err(dev, "DMAC index %u invalid.\n", dmac_index); 882 return -EINVAL; 883 } 884 dmac->icu.dmac_index = dmac_index; 885 886 return 0; 887 } 888 889 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 890 { 891 struct device_node *np = dev->of_node; 892 int ret; 893 894 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 895 if (ret < 0) { 896 dev_err(dev, "unable to read dma-channels property\n"); 897 return ret; 898 } 899 900 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { 901 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); 902 return -EINVAL; 903 } 904 905 return rz_dmac_parse_of_icu(dev, dmac); 906 } 907 908 static int rz_dmac_probe(struct platform_device *pdev) 909 { 910 const char *irqname = "error"; 911 struct dma_device *engine; 912 struct rz_dmac *dmac; 913 int channel_num; 914 int ret; 915 int irq; 916 u8 i; 917 918 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 919 if (!dmac) 920 return -ENOMEM; 921 922 dmac->dev = &pdev->dev; 923 platform_set_drvdata(pdev, dmac); 924 925 ret = rz_dmac_parse_of(&pdev->dev, dmac); 926 if (ret < 0) 927 return ret; 928 929 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 930 sizeof(*dmac->channels), GFP_KERNEL); 931 if (!dmac->channels) 932 return -ENOMEM; 933 934 /* Request resources */ 935 dmac->base = devm_platform_ioremap_resource(pdev, 0); 936 if (IS_ERR(dmac->base)) 937 return PTR_ERR(dmac->base); 938 939 if (!dmac->has_icu) { 940 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 941 if (IS_ERR(dmac->ext_base)) 942 return PTR_ERR(dmac->ext_base); 943 } 944 945 /* Register interrupt handler for error */ 946 irq = platform_get_irq_byname(pdev, irqname); 947 if (irq < 0) 948 return irq; 949 950 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, 951 irqname, NULL); 952 if (ret) { 953 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 954 irq, ret); 955 return ret; 956 } 957 958 /* Initialize the channels. */ 959 INIT_LIST_HEAD(&dmac->engine.channels); 960 961 dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev); 962 if (IS_ERR(dmac->rstc)) 963 return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc), 964 "failed to get resets\n"); 965 966 pm_runtime_enable(&pdev->dev); 967 ret = pm_runtime_resume_and_get(&pdev->dev); 968 if (ret < 0) { 969 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); 970 goto err_pm_disable; 971 } 972 973 ret = reset_control_deassert(dmac->rstc); 974 if (ret) 975 goto err_pm_runtime_put; 976 977 for (i = 0; i < dmac->n_channels; i++) { 978 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); 979 if (ret < 0) 980 goto err; 981 } 982 983 /* Register the DMAC as a DMA provider for DT. */ 984 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, 985 NULL); 986 if (ret < 0) 987 goto err; 988 989 /* Register the DMA engine device. */ 990 engine = &dmac->engine; 991 dma_cap_set(DMA_SLAVE, engine->cap_mask); 992 dma_cap_set(DMA_MEMCPY, engine->cap_mask); 993 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); 994 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); 995 996 engine->dev = &pdev->dev; 997 998 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; 999 engine->device_free_chan_resources = rz_dmac_free_chan_resources; 1000 engine->device_tx_status = dma_cookie_status; 1001 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; 1002 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; 1003 engine->device_config = rz_dmac_config; 1004 engine->device_terminate_all = rz_dmac_terminate_all; 1005 engine->device_issue_pending = rz_dmac_issue_pending; 1006 engine->device_synchronize = rz_dmac_device_synchronize; 1007 1008 engine->copy_align = DMAENGINE_ALIGN_1_BYTE; 1009 dma_set_max_seg_size(engine->dev, U32_MAX); 1010 1011 ret = dma_async_device_register(engine); 1012 if (ret < 0) { 1013 dev_err(&pdev->dev, "unable to register\n"); 1014 goto dma_register_err; 1015 } 1016 return 0; 1017 1018 dma_register_err: 1019 of_dma_controller_free(pdev->dev.of_node); 1020 err: 1021 channel_num = i ? i - 1 : 0; 1022 for (i = 0; i < channel_num; i++) { 1023 struct rz_dmac_chan *channel = &dmac->channels[i]; 1024 1025 dma_free_coherent(&pdev->dev, 1026 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 1027 channel->lmdesc.base, 1028 channel->lmdesc.base_dma); 1029 } 1030 1031 reset_control_assert(dmac->rstc); 1032 err_pm_runtime_put: 1033 pm_runtime_put(&pdev->dev); 1034 err_pm_disable: 1035 pm_runtime_disable(&pdev->dev); 1036 1037 return ret; 1038 } 1039 1040 static void rz_dmac_remove(struct platform_device *pdev) 1041 { 1042 struct rz_dmac *dmac = platform_get_drvdata(pdev); 1043 unsigned int i; 1044 1045 dma_async_device_unregister(&dmac->engine); 1046 of_dma_controller_free(pdev->dev.of_node); 1047 for (i = 0; i < dmac->n_channels; i++) { 1048 struct rz_dmac_chan *channel = &dmac->channels[i]; 1049 1050 dma_free_coherent(&pdev->dev, 1051 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 1052 channel->lmdesc.base, 1053 channel->lmdesc.base_dma); 1054 } 1055 reset_control_assert(dmac->rstc); 1056 pm_runtime_put(&pdev->dev); 1057 pm_runtime_disable(&pdev->dev); 1058 1059 platform_device_put(dmac->icu.pdev); 1060 } 1061 1062 static const struct of_device_id of_rz_dmac_match[] = { 1063 { .compatible = "renesas,r9a09g057-dmac", }, 1064 { .compatible = "renesas,rz-dmac", }, 1065 { /* Sentinel */ } 1066 }; 1067 MODULE_DEVICE_TABLE(of, of_rz_dmac_match); 1068 1069 static struct platform_driver rz_dmac_driver = { 1070 .driver = { 1071 .name = "rz-dmac", 1072 .of_match_table = of_rz_dmac_match, 1073 }, 1074 .probe = rz_dmac_probe, 1075 .remove = rz_dmac_remove, 1076 }; 1077 1078 module_platform_driver(rz_dmac_driver); 1079 1080 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); 1081 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 1082 MODULE_LICENSE("GPL v2"); 1083