1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SA11x0 DMAengine support 4 * 5 * Copyright (C) 2012 Russell King 6 * Derived in part from arch/arm/mach-sa1100/dma.c, 7 * Copyright (C) 2000, 2001 by Nicolas Pitre 8 */ 9 #include <linux/sched.h> 10 #include <linux/device.h> 11 #include <linux/dmaengine.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 20 #include "virt-dma.h" 21 22 #define NR_PHY_CHAN 6 23 #define DMA_ALIGN 3 24 #define DMA_MAX_SIZE 0x1fff 25 #define DMA_CHUNK_SIZE 0x1000 26 27 #define DMA_DDAR 0x00 28 #define DMA_DCSR_S 0x04 29 #define DMA_DCSR_C 0x08 30 #define DMA_DCSR_R 0x0c 31 #define DMA_DBSA 0x10 32 #define DMA_DBTA 0x14 33 #define DMA_DBSB 0x18 34 #define DMA_DBTB 0x1c 35 #define DMA_SIZE 0x20 36 37 #define DCSR_RUN (1 << 0) 38 #define DCSR_IE (1 << 1) 39 #define DCSR_ERROR (1 << 2) 40 #define DCSR_DONEA (1 << 3) 41 #define DCSR_STRTA (1 << 4) 42 #define DCSR_DONEB (1 << 5) 43 #define DCSR_STRTB (1 << 6) 44 #define DCSR_BIU (1 << 7) 45 46 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ 47 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ 48 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ 49 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ 50 #define DDAR_Ser0UDCTr (0x0 << 4) 51 #define DDAR_Ser0UDCRc (0x1 << 4) 52 #define DDAR_Ser1SDLCTr (0x2 << 4) 53 #define DDAR_Ser1SDLCRc (0x3 << 4) 54 #define DDAR_Ser1UARTTr (0x4 << 4) 55 #define DDAR_Ser1UARTRc (0x5 << 4) 56 #define DDAR_Ser2ICPTr (0x6 << 4) 57 #define DDAR_Ser2ICPRc (0x7 << 4) 58 #define DDAR_Ser3UARTTr (0x8 << 4) 59 #define DDAR_Ser3UARTRc (0x9 << 4) 60 #define DDAR_Ser4MCP0Tr (0xa << 4) 61 #define DDAR_Ser4MCP0Rc (0xb << 4) 62 #define DDAR_Ser4MCP1Tr (0xc << 4) 63 #define DDAR_Ser4MCP1Rc (0xd << 4) 64 #define DDAR_Ser4SSPTr (0xe << 4) 65 #define DDAR_Ser4SSPRc (0xf << 4) 66 67 struct sa11x0_dma_sg { 68 u32 addr; 69 u32 len; 70 }; 71 72 struct sa11x0_dma_desc { 73 struct virt_dma_desc vd; 74 75 u32 ddar; 76 size_t size; 77 unsigned period; 78 bool cyclic; 79 80 unsigned sglen; 81 struct sa11x0_dma_sg sg[] __counted_by(sglen); 82 }; 83 84 struct sa11x0_dma_phy; 85 86 struct sa11x0_dma_chan { 87 struct virt_dma_chan vc; 88 89 /* protected by c->vc.lock */ 90 struct sa11x0_dma_phy *phy; 91 enum dma_status status; 92 93 /* protected by d->lock */ 94 struct list_head node; 95 96 u32 ddar; 97 const char *name; 98 }; 99 100 struct sa11x0_dma_phy { 101 void __iomem *base; 102 struct sa11x0_dma_dev *dev; 103 unsigned num; 104 105 struct sa11x0_dma_chan *vchan; 106 107 /* Protected by c->vc.lock */ 108 unsigned sg_load; 109 struct sa11x0_dma_desc *txd_load; 110 unsigned sg_done; 111 struct sa11x0_dma_desc *txd_done; 112 u32 dbs[2]; 113 u32 dbt[2]; 114 u32 dcsr; 115 }; 116 117 struct sa11x0_dma_dev { 118 struct dma_device slave; 119 void __iomem *base; 120 spinlock_t lock; 121 struct tasklet_struct task; 122 struct list_head chan_pending; 123 struct sa11x0_dma_phy phy[NR_PHY_CHAN]; 124 }; 125 126 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) 127 { 128 return container_of(chan, struct sa11x0_dma_chan, vc.chan); 129 } 130 131 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) 132 { 133 return container_of(dmadev, struct sa11x0_dma_dev, slave); 134 } 135 136 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) 137 { 138 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 139 140 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; 141 } 142 143 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) 144 { 145 kfree(container_of(vd, struct sa11x0_dma_desc, vd)); 146 } 147 148 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) 149 { 150 list_del(&txd->vd.node); 151 p->txd_load = txd; 152 p->sg_load = 0; 153 154 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", 155 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); 156 } 157 158 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, 159 struct sa11x0_dma_chan *c) 160 { 161 struct sa11x0_dma_desc *txd = p->txd_load; 162 struct sa11x0_dma_sg *sg; 163 void __iomem *base = p->base; 164 unsigned dbsx, dbtx; 165 u32 dcsr; 166 167 if (!txd) 168 return; 169 170 dcsr = readl_relaxed(base + DMA_DCSR_R); 171 172 /* Don't try to load the next transfer if both buffers are started */ 173 if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) 174 return; 175 176 if (p->sg_load == txd->sglen) { 177 if (!txd->cyclic) { 178 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); 179 180 /* 181 * We have reached the end of the current descriptor. 182 * Peek at the next descriptor, and if compatible with 183 * the current, start processing it. 184 */ 185 if (txn && txn->ddar == txd->ddar) { 186 txd = txn; 187 sa11x0_dma_start_desc(p, txn); 188 } else { 189 p->txd_load = NULL; 190 return; 191 } 192 } else { 193 /* Cyclic: reset back to beginning */ 194 p->sg_load = 0; 195 } 196 } 197 198 sg = &txd->sg[p->sg_load++]; 199 200 /* Select buffer to load according to channel status */ 201 if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || 202 ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { 203 dbsx = DMA_DBSA; 204 dbtx = DMA_DBTA; 205 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; 206 } else { 207 dbsx = DMA_DBSB; 208 dbtx = DMA_DBTB; 209 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; 210 } 211 212 writel_relaxed(sg->addr, base + dbsx); 213 writel_relaxed(sg->len, base + dbtx); 214 writel(dcsr, base + DMA_DCSR_S); 215 216 dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", 217 p->num, dcsr, 218 'A' + (dbsx == DMA_DBSB), sg->addr, 219 'A' + (dbtx == DMA_DBTB), sg->len); 220 } 221 222 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, 223 struct sa11x0_dma_chan *c) 224 { 225 struct sa11x0_dma_desc *txd = p->txd_done; 226 227 if (++p->sg_done == txd->sglen) { 228 if (!txd->cyclic) { 229 vchan_cookie_complete(&txd->vd); 230 231 p->sg_done = 0; 232 p->txd_done = p->txd_load; 233 234 if (!p->txd_done) 235 tasklet_schedule(&p->dev->task); 236 } else { 237 if ((p->sg_done % txd->period) == 0) 238 vchan_cyclic_callback(&txd->vd); 239 240 /* Cyclic: reset back to beginning */ 241 p->sg_done = 0; 242 } 243 } 244 245 sa11x0_dma_start_sg(p, c); 246 } 247 248 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) 249 { 250 struct sa11x0_dma_phy *p = dev_id; 251 struct sa11x0_dma_dev *d = p->dev; 252 struct sa11x0_dma_chan *c; 253 u32 dcsr; 254 255 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 256 if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) 257 return IRQ_NONE; 258 259 /* Clear reported status bits */ 260 writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), 261 p->base + DMA_DCSR_C); 262 263 dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); 264 265 if (dcsr & DCSR_ERROR) { 266 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", 267 p->num, dcsr, 268 readl_relaxed(p->base + DMA_DDAR), 269 readl_relaxed(p->base + DMA_DBSA), 270 readl_relaxed(p->base + DMA_DBTA), 271 readl_relaxed(p->base + DMA_DBSB), 272 readl_relaxed(p->base + DMA_DBTB)); 273 } 274 275 c = p->vchan; 276 if (c) { 277 unsigned long flags; 278 279 spin_lock_irqsave(&c->vc.lock, flags); 280 /* 281 * Now that we're holding the lock, check that the vchan 282 * really is associated with this pchan before touching the 283 * hardware. This should always succeed, because we won't 284 * change p->vchan or c->phy while the channel is actively 285 * transferring. 286 */ 287 if (c->phy == p) { 288 if (dcsr & DCSR_DONEA) 289 sa11x0_dma_complete(p, c); 290 if (dcsr & DCSR_DONEB) 291 sa11x0_dma_complete(p, c); 292 } 293 spin_unlock_irqrestore(&c->vc.lock, flags); 294 } 295 296 return IRQ_HANDLED; 297 } 298 299 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) 300 { 301 struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); 302 303 /* If the issued list is empty, we have no further txds to process */ 304 if (txd) { 305 struct sa11x0_dma_phy *p = c->phy; 306 307 sa11x0_dma_start_desc(p, txd); 308 p->txd_done = txd; 309 p->sg_done = 0; 310 311 /* The channel should not have any transfers started */ 312 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & 313 (DCSR_STRTA | DCSR_STRTB)); 314 315 /* Clear the run and start bits before changing DDAR */ 316 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, 317 p->base + DMA_DCSR_C); 318 writel_relaxed(txd->ddar, p->base + DMA_DDAR); 319 320 /* Try to start both buffers */ 321 sa11x0_dma_start_sg(p, c); 322 sa11x0_dma_start_sg(p, c); 323 } 324 } 325 326 static void sa11x0_dma_tasklet(struct tasklet_struct *t) 327 { 328 struct sa11x0_dma_dev *d = from_tasklet(d, t, task); 329 struct sa11x0_dma_phy *p; 330 struct sa11x0_dma_chan *c; 331 unsigned pch, pch_alloc = 0; 332 333 dev_dbg(d->slave.dev, "tasklet enter\n"); 334 335 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { 336 spin_lock_irq(&c->vc.lock); 337 p = c->phy; 338 if (p && !p->txd_done) { 339 sa11x0_dma_start_txd(c); 340 if (!p->txd_done) { 341 /* No current txd associated with this channel */ 342 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); 343 344 /* Mark this channel free */ 345 c->phy = NULL; 346 p->vchan = NULL; 347 } 348 } 349 spin_unlock_irq(&c->vc.lock); 350 } 351 352 spin_lock_irq(&d->lock); 353 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 354 p = &d->phy[pch]; 355 356 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 357 c = list_first_entry(&d->chan_pending, 358 struct sa11x0_dma_chan, node); 359 list_del_init(&c->node); 360 361 pch_alloc |= 1 << pch; 362 363 /* Mark this channel allocated */ 364 p->vchan = c; 365 366 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); 367 } 368 } 369 spin_unlock_irq(&d->lock); 370 371 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 372 if (pch_alloc & (1 << pch)) { 373 p = &d->phy[pch]; 374 c = p->vchan; 375 376 spin_lock_irq(&c->vc.lock); 377 c->phy = p; 378 379 sa11x0_dma_start_txd(c); 380 spin_unlock_irq(&c->vc.lock); 381 } 382 } 383 384 dev_dbg(d->slave.dev, "tasklet exit\n"); 385 } 386 387 388 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) 389 { 390 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 391 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 392 unsigned long flags; 393 394 spin_lock_irqsave(&d->lock, flags); 395 list_del_init(&c->node); 396 spin_unlock_irqrestore(&d->lock, flags); 397 398 vchan_free_chan_resources(&c->vc); 399 } 400 401 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) 402 { 403 unsigned reg; 404 u32 dcsr; 405 406 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 407 408 if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || 409 (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) 410 reg = DMA_DBSA; 411 else 412 reg = DMA_DBSB; 413 414 return readl_relaxed(p->base + reg); 415 } 416 417 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, 418 dma_cookie_t cookie, struct dma_tx_state *state) 419 { 420 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 421 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 422 struct sa11x0_dma_phy *p; 423 struct virt_dma_desc *vd; 424 unsigned long flags; 425 enum dma_status ret; 426 427 ret = dma_cookie_status(&c->vc.chan, cookie, state); 428 if (ret == DMA_COMPLETE) 429 return ret; 430 431 if (!state) 432 return c->status; 433 434 spin_lock_irqsave(&c->vc.lock, flags); 435 p = c->phy; 436 437 /* 438 * If the cookie is on our issue queue, then the residue is 439 * its total size. 440 */ 441 vd = vchan_find_desc(&c->vc, cookie); 442 if (vd) { 443 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; 444 } else if (!p) { 445 state->residue = 0; 446 } else { 447 struct sa11x0_dma_desc *txd; 448 size_t bytes = 0; 449 450 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) 451 txd = p->txd_done; 452 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) 453 txd = p->txd_load; 454 else 455 txd = NULL; 456 457 ret = c->status; 458 if (txd) { 459 dma_addr_t addr = sa11x0_dma_pos(p); 460 unsigned i; 461 462 dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr); 463 464 for (i = 0; i < txd->sglen; i++) { 465 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", 466 i, txd->sg[i].addr, txd->sg[i].len); 467 if (addr >= txd->sg[i].addr && 468 addr < txd->sg[i].addr + txd->sg[i].len) { 469 unsigned len; 470 471 len = txd->sg[i].len - 472 (addr - txd->sg[i].addr); 473 dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", 474 i, len); 475 bytes += len; 476 i++; 477 break; 478 } 479 } 480 for (; i < txd->sglen; i++) { 481 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", 482 i, txd->sg[i].addr, txd->sg[i].len); 483 bytes += txd->sg[i].len; 484 } 485 } 486 state->residue = bytes; 487 } 488 spin_unlock_irqrestore(&c->vc.lock, flags); 489 490 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue); 491 492 return ret; 493 } 494 495 /* 496 * Move pending txds to the issued list, and re-init pending list. 497 * If not already pending, add this channel to the list of pending 498 * channels and trigger the tasklet to run. 499 */ 500 static void sa11x0_dma_issue_pending(struct dma_chan *chan) 501 { 502 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 503 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 504 unsigned long flags; 505 506 spin_lock_irqsave(&c->vc.lock, flags); 507 if (vchan_issue_pending(&c->vc)) { 508 if (!c->phy) { 509 spin_lock(&d->lock); 510 if (list_empty(&c->node)) { 511 list_add_tail(&c->node, &d->chan_pending); 512 tasklet_schedule(&d->task); 513 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); 514 } 515 spin_unlock(&d->lock); 516 } 517 } else 518 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); 519 spin_unlock_irqrestore(&c->vc.lock, flags); 520 } 521 522 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 523 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, 524 enum dma_transfer_direction dir, unsigned long flags, void *context) 525 { 526 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 527 struct sa11x0_dma_desc *txd; 528 struct scatterlist *sgent; 529 unsigned int i, j; 530 size_t size = 0; 531 532 /* SA11x0 channels can only operate in their native direction */ 533 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 534 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 535 &c->vc, c->ddar, dir); 536 return NULL; 537 } 538 539 /* Do not allow zero-sized txds */ 540 if (sglen == 0) 541 return NULL; 542 543 for_each_sg(sg, sgent, sglen, i) { 544 dma_addr_t addr = sg_dma_address(sgent); 545 546 if (addr & DMA_ALIGN) { 547 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n", 548 &c->vc, &addr); 549 return NULL; 550 } 551 } 552 553 j = sg_nents_for_dma(sg, sglen, DMA_MAX_SIZE & ~DMA_ALIGN); 554 txd = kzalloc_flex(*txd, sg, j, GFP_ATOMIC); 555 if (!txd) { 556 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 557 return NULL; 558 } 559 txd->sglen = j; 560 561 j = 0; 562 for_each_sg(sg, sgent, sglen, i) { 563 dma_addr_t addr = sg_dma_address(sgent); 564 unsigned len = sg_dma_len(sgent); 565 566 size += len; 567 568 do { 569 unsigned tlen = len; 570 571 /* 572 * Check whether the transfer will fit. If not, try 573 * to split the transfer up such that we end up with 574 * equal chunks - but make sure that we preserve the 575 * alignment. This avoids small segments. 576 */ 577 if (tlen > DMA_MAX_SIZE) { 578 unsigned mult = DIV_ROUND_UP(tlen, 579 DMA_MAX_SIZE & ~DMA_ALIGN); 580 581 tlen = (tlen / mult) & ~DMA_ALIGN; 582 } 583 584 txd->sg[j].addr = addr; 585 txd->sg[j].len = tlen; 586 587 addr += tlen; 588 len -= tlen; 589 j++; 590 } while (len); 591 } 592 593 txd->ddar = c->ddar; 594 txd->size = size; 595 596 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n", 597 &c->vc, &txd->vd, txd->size, txd->sglen); 598 599 return vchan_tx_prep(&c->vc, &txd->vd, flags); 600 } 601 602 static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( 603 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, 604 enum dma_transfer_direction dir, unsigned long flags) 605 { 606 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 607 struct sa11x0_dma_desc *txd; 608 unsigned i, j, k, sglen, sgperiod; 609 610 /* SA11x0 channels can only operate in their native direction */ 611 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 612 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 613 &c->vc, c->ddar, dir); 614 return NULL; 615 } 616 617 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); 618 sglen = size * sgperiod / period; 619 620 /* Do not allow zero-sized txds */ 621 if (sglen == 0) 622 return NULL; 623 624 txd = kzalloc_flex(*txd, sg, sglen, GFP_ATOMIC); 625 if (!txd) { 626 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 627 return NULL; 628 } 629 txd->sglen = sglen; 630 631 for (i = k = 0; i < size / period; i++) { 632 size_t tlen, len = period; 633 634 for (j = 0; j < sgperiod; j++, k++) { 635 tlen = len; 636 637 if (tlen > DMA_MAX_SIZE) { 638 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); 639 tlen = (tlen / mult) & ~DMA_ALIGN; 640 } 641 642 txd->sg[k].addr = addr; 643 txd->sg[k].len = tlen; 644 addr += tlen; 645 len -= tlen; 646 } 647 648 WARN_ON(len != 0); 649 } 650 651 WARN_ON(k != sglen); 652 653 txd->ddar = c->ddar; 654 txd->size = size; 655 txd->cyclic = 1; 656 txd->period = sgperiod; 657 658 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 659 } 660 661 static int sa11x0_dma_device_config(struct dma_chan *chan, 662 struct dma_slave_config *cfg) 663 { 664 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 665 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 666 dma_addr_t addr; 667 enum dma_slave_buswidth width; 668 u32 maxburst; 669 670 if (ddar & DDAR_RW) { 671 addr = cfg->src_addr; 672 width = cfg->src_addr_width; 673 maxburst = cfg->src_maxburst; 674 } else { 675 addr = cfg->dst_addr; 676 width = cfg->dst_addr_width; 677 maxburst = cfg->dst_maxburst; 678 } 679 680 if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && 681 width != DMA_SLAVE_BUSWIDTH_2_BYTES) || 682 (maxburst != 4 && maxburst != 8)) 683 return -EINVAL; 684 685 if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 686 ddar |= DDAR_DW; 687 if (maxburst == 8) 688 ddar |= DDAR_BS; 689 690 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n", 691 &c->vc, &addr, width, maxburst); 692 693 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; 694 695 return 0; 696 } 697 698 static int sa11x0_dma_device_pause(struct dma_chan *chan) 699 { 700 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 701 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 702 struct sa11x0_dma_phy *p; 703 unsigned long flags; 704 705 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 706 spin_lock_irqsave(&c->vc.lock, flags); 707 if (c->status == DMA_IN_PROGRESS) { 708 c->status = DMA_PAUSED; 709 710 p = c->phy; 711 if (p) { 712 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 713 } else { 714 spin_lock(&d->lock); 715 list_del_init(&c->node); 716 spin_unlock(&d->lock); 717 } 718 } 719 spin_unlock_irqrestore(&c->vc.lock, flags); 720 721 return 0; 722 } 723 724 static int sa11x0_dma_device_resume(struct dma_chan *chan) 725 { 726 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 727 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 728 struct sa11x0_dma_phy *p; 729 unsigned long flags; 730 731 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 732 spin_lock_irqsave(&c->vc.lock, flags); 733 if (c->status == DMA_PAUSED) { 734 c->status = DMA_IN_PROGRESS; 735 736 p = c->phy; 737 if (p) { 738 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 739 } else if (!list_empty(&c->vc.desc_issued)) { 740 spin_lock(&d->lock); 741 list_add_tail(&c->node, &d->chan_pending); 742 spin_unlock(&d->lock); 743 } 744 } 745 spin_unlock_irqrestore(&c->vc.lock, flags); 746 747 return 0; 748 } 749 750 static int sa11x0_dma_device_terminate_all(struct dma_chan *chan) 751 { 752 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 753 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 754 struct sa11x0_dma_phy *p; 755 LIST_HEAD(head); 756 unsigned long flags; 757 758 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 759 /* Clear the tx descriptor lists */ 760 spin_lock_irqsave(&c->vc.lock, flags); 761 vchan_get_all_descriptors(&c->vc, &head); 762 763 p = c->phy; 764 if (p) { 765 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 766 /* vchan is assigned to a pchan - stop the channel */ 767 writel(DCSR_RUN | DCSR_IE | 768 DCSR_STRTA | DCSR_DONEA | 769 DCSR_STRTB | DCSR_DONEB, 770 p->base + DMA_DCSR_C); 771 772 if (p->txd_load) { 773 if (p->txd_load != p->txd_done) 774 list_add_tail(&p->txd_load->vd.node, &head); 775 p->txd_load = NULL; 776 } 777 if (p->txd_done) { 778 list_add_tail(&p->txd_done->vd.node, &head); 779 p->txd_done = NULL; 780 } 781 c->phy = NULL; 782 spin_lock(&d->lock); 783 p->vchan = NULL; 784 spin_unlock(&d->lock); 785 tasklet_schedule(&d->task); 786 } 787 spin_unlock_irqrestore(&c->vc.lock, flags); 788 vchan_dma_desc_free_list(&c->vc, &head); 789 790 return 0; 791 } 792 793 struct sa11x0_dma_channel_desc { 794 u32 ddar; 795 const char *name; 796 }; 797 798 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } 799 static const struct sa11x0_dma_channel_desc chan_desc[] = { 800 CD(Ser0UDCTr, 0), 801 CD(Ser0UDCRc, DDAR_RW), 802 CD(Ser1SDLCTr, 0), 803 CD(Ser1SDLCRc, DDAR_RW), 804 CD(Ser1UARTTr, 0), 805 CD(Ser1UARTRc, DDAR_RW), 806 CD(Ser2ICPTr, 0), 807 CD(Ser2ICPRc, DDAR_RW), 808 CD(Ser3UARTTr, 0), 809 CD(Ser3UARTRc, DDAR_RW), 810 CD(Ser4MCP0Tr, 0), 811 CD(Ser4MCP0Rc, DDAR_RW), 812 CD(Ser4MCP1Tr, 0), 813 CD(Ser4MCP1Rc, DDAR_RW), 814 CD(Ser4SSPTr, 0), 815 CD(Ser4SSPRc, DDAR_RW), 816 }; 817 818 static const struct dma_slave_map sa11x0_dma_map[] = { 819 { "sa11x0-ir", "tx", "Ser2ICPTr" }, 820 { "sa11x0-ir", "rx", "Ser2ICPRc" }, 821 { "sa11x0-ssp", "tx", "Ser4SSPTr" }, 822 { "sa11x0-ssp", "rx", "Ser4SSPRc" }, 823 }; 824 825 static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) 826 { 827 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 828 const char *p = param; 829 830 return !strcmp(c->name, p); 831 } 832 833 static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, 834 struct device *dev) 835 { 836 unsigned i; 837 838 INIT_LIST_HEAD(&dmadev->channels); 839 dmadev->dev = dev; 840 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; 841 dmadev->device_config = sa11x0_dma_device_config; 842 dmadev->device_pause = sa11x0_dma_device_pause; 843 dmadev->device_resume = sa11x0_dma_device_resume; 844 dmadev->device_terminate_all = sa11x0_dma_device_terminate_all; 845 dmadev->device_tx_status = sa11x0_dma_tx_status; 846 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 847 848 for (i = 0; i < ARRAY_SIZE(chan_desc); i++) { 849 struct sa11x0_dma_chan *c; 850 851 c = kzalloc_obj(*c); 852 if (!c) { 853 dev_err(dev, "no memory for channel %u\n", i); 854 return -ENOMEM; 855 } 856 857 c->status = DMA_IN_PROGRESS; 858 c->ddar = chan_desc[i].ddar; 859 c->name = chan_desc[i].name; 860 INIT_LIST_HEAD(&c->node); 861 862 c->vc.desc_free = sa11x0_dma_free_desc; 863 vchan_init(&c->vc, dmadev); 864 } 865 866 return dma_async_device_register(dmadev); 867 } 868 869 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, 870 void *data) 871 { 872 int irq = platform_get_irq(pdev, nr); 873 874 if (irq <= 0) 875 return -ENXIO; 876 877 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); 878 } 879 880 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, 881 void *data) 882 { 883 int irq = platform_get_irq(pdev, nr); 884 if (irq > 0) 885 free_irq(irq, data); 886 } 887 888 static void sa11x0_dma_free_channels(struct dma_device *dmadev) 889 { 890 struct sa11x0_dma_chan *c, *cn; 891 892 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { 893 list_del(&c->vc.chan.device_node); 894 tasklet_kill(&c->vc.task); 895 kfree(c); 896 } 897 } 898 899 static int sa11x0_dma_probe(struct platform_device *pdev) 900 { 901 struct sa11x0_dma_dev *d; 902 struct resource *res; 903 unsigned i; 904 int ret; 905 906 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 907 if (!res) 908 return -ENXIO; 909 910 d = kzalloc_obj(*d); 911 if (!d) { 912 ret = -ENOMEM; 913 goto err_alloc; 914 } 915 916 spin_lock_init(&d->lock); 917 INIT_LIST_HEAD(&d->chan_pending); 918 919 d->slave.filter.fn = sa11x0_dma_filter_fn; 920 d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map); 921 d->slave.filter.map = sa11x0_dma_map; 922 923 d->base = ioremap(res->start, resource_size(res)); 924 if (!d->base) { 925 ret = -ENOMEM; 926 goto err_ioremap; 927 } 928 929 tasklet_setup(&d->task, sa11x0_dma_tasklet); 930 931 for (i = 0; i < NR_PHY_CHAN; i++) { 932 struct sa11x0_dma_phy *p = &d->phy[i]; 933 934 p->dev = d; 935 p->num = i; 936 p->base = d->base + i * DMA_SIZE; 937 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | 938 DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, 939 p->base + DMA_DCSR_C); 940 writel_relaxed(0, p->base + DMA_DDAR); 941 942 ret = sa11x0_dma_request_irq(pdev, i, p); 943 if (ret) { 944 while (i) { 945 i--; 946 sa11x0_dma_free_irq(pdev, i, &d->phy[i]); 947 } 948 goto err_irq; 949 } 950 } 951 952 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 953 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); 954 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; 955 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; 956 d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 957 d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 958 d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 959 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES); 960 d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 961 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES); 962 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); 963 if (ret) { 964 dev_warn(d->slave.dev, "failed to register slave async device: %d\n", 965 ret); 966 goto err_slave_reg; 967 } 968 969 platform_set_drvdata(pdev, d); 970 return 0; 971 972 err_slave_reg: 973 sa11x0_dma_free_channels(&d->slave); 974 for (i = 0; i < NR_PHY_CHAN; i++) 975 sa11x0_dma_free_irq(pdev, i, &d->phy[i]); 976 err_irq: 977 tasklet_kill(&d->task); 978 iounmap(d->base); 979 err_ioremap: 980 kfree(d); 981 err_alloc: 982 return ret; 983 } 984 985 static void sa11x0_dma_remove(struct platform_device *pdev) 986 { 987 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); 988 unsigned pch; 989 990 dma_async_device_unregister(&d->slave); 991 992 sa11x0_dma_free_channels(&d->slave); 993 for (pch = 0; pch < NR_PHY_CHAN; pch++) 994 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); 995 tasklet_kill(&d->task); 996 iounmap(d->base); 997 kfree(d); 998 } 999 1000 static __maybe_unused int sa11x0_dma_suspend(struct device *dev) 1001 { 1002 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 1003 unsigned pch; 1004 1005 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 1006 struct sa11x0_dma_phy *p = &d->phy[pch]; 1007 u32 dcsr, saved_dcsr; 1008 1009 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1010 if (dcsr & DCSR_RUN) { 1011 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 1012 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1013 } 1014 1015 saved_dcsr &= DCSR_RUN | DCSR_IE; 1016 if (dcsr & DCSR_BIU) { 1017 p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); 1018 p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); 1019 p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); 1020 p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); 1021 saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | 1022 (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); 1023 } else { 1024 p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); 1025 p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); 1026 p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); 1027 p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); 1028 saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); 1029 } 1030 p->dcsr = saved_dcsr; 1031 1032 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); 1033 } 1034 1035 return 0; 1036 } 1037 1038 static __maybe_unused int sa11x0_dma_resume(struct device *dev) 1039 { 1040 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 1041 unsigned pch; 1042 1043 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 1044 struct sa11x0_dma_phy *p = &d->phy[pch]; 1045 struct sa11x0_dma_desc *txd = NULL; 1046 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1047 1048 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); 1049 1050 if (p->txd_done) 1051 txd = p->txd_done; 1052 else if (p->txd_load) 1053 txd = p->txd_load; 1054 1055 if (!txd) 1056 continue; 1057 1058 writel_relaxed(txd->ddar, p->base + DMA_DDAR); 1059 1060 writel_relaxed(p->dbs[0], p->base + DMA_DBSA); 1061 writel_relaxed(p->dbt[0], p->base + DMA_DBTA); 1062 writel_relaxed(p->dbs[1], p->base + DMA_DBSB); 1063 writel_relaxed(p->dbt[1], p->base + DMA_DBTB); 1064 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); 1065 } 1066 1067 return 0; 1068 } 1069 1070 static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1071 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume) 1072 }; 1073 1074 static struct platform_driver sa11x0_dma_driver = { 1075 .driver = { 1076 .name = "sa11x0-dma", 1077 .pm = &sa11x0_dma_pm_ops, 1078 }, 1079 .probe = sa11x0_dma_probe, 1080 .remove = sa11x0_dma_remove, 1081 }; 1082 1083 static int __init sa11x0_dma_init(void) 1084 { 1085 return platform_driver_register(&sa11x0_dma_driver); 1086 } 1087 subsys_initcall(sa11x0_dma_init); 1088 1089 static void __exit sa11x0_dma_exit(void) 1090 { 1091 platform_driver_unregister(&sa11x0_dma_driver); 1092 } 1093 module_exit(sa11x0_dma_exit); 1094 1095 MODULE_AUTHOR("Russell King"); 1096 MODULE_DESCRIPTION("SA-11x0 DMA driver"); 1097 MODULE_LICENSE("GPL v2"); 1098 MODULE_ALIAS("platform:sa11x0-dma"); 1099