1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * 22 * The full GNU General Public License is in this distribution in the file 23 * called COPYING. 24 * 25 * Documentation: ARM DDI 0196G == PL080 26 * Documentation: ARM DDI 0218E == PL081 27 * 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * channel. 30 * 31 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * has only two channels. So on these DMA controllers the number of channels 33 * and the number of incoming DMA signals are two totally different things. 34 * It is usually not possible to theoretically handle all physical signals, 35 * so a multiplexing scheme with possible denial of use is necessary. 36 * 37 * The PL080 has a dual bus master, PL081 has a single master. 38 * 39 * Memory to peripheral transfer may be visualized as 40 * Get data from memory to DMAC 41 * Until no data left 42 * On burst request from peripheral 43 * Destination burst from DMAC to peripheral 44 * Clear burst request 45 * Raise terminal count interrupt 46 * 47 * For peripherals with a FIFO: 48 * Source burst size == half the depth of the peripheral FIFO 49 * Destination burst size == the depth of the peripheral FIFO 50 * 51 * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 * signals, the DMA controller will simply facilitate its AHB master.) 53 * 54 * ASSUMES default (little) endianness for DMA transfers 55 * 56 * The PL08x has two flow control settings: 57 * - DMAC flow control: the transfer size defines the number of transfers 58 * which occur for the current LLI entry, and the DMAC raises TC at the 59 * end of every LLI entry. Observed behaviour shows the DMAC listening 60 * to both the BREQ and SREQ signals (contrary to documented), 61 * transferring data if either is active. The LBREQ and LSREQ signals 62 * are ignored. 63 * 64 * - Peripheral flow control: the transfer size is ignored (and should be 65 * zero). The data is transferred from the current LLI entry, until 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 * will then move to the next LLI entry. 68 * 69 * Only the former works sanely with scatter lists, so we only implement 70 * the DMAC flow control method. However, peripherals which use the LBREQ 71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through 72 * these hardware restrictions prevents them from using scatter DMA. 73 * 74 * Global TODO: 75 * - Break out common code from arch/arm/mach-s3c64xx and share 76 */ 77 #include <linux/device.h> 78 #include <linux/init.h> 79 #include <linux/module.h> 80 #include <linux/interrupt.h> 81 #include <linux/slab.h> 82 #include <linux/delay.h> 83 #include <linux/dma-mapping.h> 84 #include <linux/dmapool.h> 85 #include <linux/dmaengine.h> 86 #include <linux/amba/bus.h> 87 #include <linux/amba/pl08x.h> 88 #include <linux/debugfs.h> 89 #include <linux/seq_file.h> 90 91 #include <asm/hardware/pl080.h> 92 93 #define DRIVER_NAME "pl08xdmac" 94 95 /** 96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 97 * @channels: the number of channels available in this variant 98 * @dualmaster: whether this version supports dual AHB masters or not. 99 */ 100 struct vendor_data { 101 u8 channels; 102 bool dualmaster; 103 }; 104 105 /* 106 * PL08X private data structures 107 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, 108 * start & end do not - their bus bit info is in cctl. Also note that these 109 * are fixed 32-bit quantities. 110 */ 111 struct pl08x_lli { 112 u32 src; 113 u32 dst; 114 u32 lli; 115 u32 cctl; 116 }; 117 118 /** 119 * struct pl08x_driver_data - the local state holder for the PL08x 120 * @slave: slave engine for this instance 121 * @memcpy: memcpy engine for this instance 122 * @base: virtual memory base (remapped) for the PL08x 123 * @adev: the corresponding AMBA (PrimeCell) bus entry 124 * @vd: vendor data for this PL08x variant 125 * @pd: platform data passed in from the platform/machine 126 * @phy_chans: array of data for the physical channels 127 * @pool: a pool for the LLI descriptors 128 * @pool_ctr: counter of LLIs in the pool 129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 130 * @mem_buses: set to indicate memory transfers on AHB2. 131 * @lock: a spinlock for this struct 132 */ 133 struct pl08x_driver_data { 134 struct dma_device slave; 135 struct dma_device memcpy; 136 void __iomem *base; 137 struct amba_device *adev; 138 const struct vendor_data *vd; 139 struct pl08x_platform_data *pd; 140 struct pl08x_phy_chan *phy_chans; 141 struct dma_pool *pool; 142 int pool_ctr; 143 u8 lli_buses; 144 u8 mem_buses; 145 spinlock_t lock; 146 }; 147 148 /* 149 * PL08X specific defines 150 */ 151 152 /* 153 * Memory boundaries: the manual for PL08x says that the controller 154 * cannot read past a 1KiB boundary, so these defines are used to 155 * create transfer LLIs that do not cross such boundaries. 156 */ 157 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 158 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 159 160 /* Size (bytes) of each LLI buffer allocated for one transfer */ 161 # define PL08X_LLI_TSFR_SIZE 0x2000 162 163 /* Maximum times we call dma_pool_alloc on this pool without freeing */ 164 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 165 #define PL08X_ALIGN 8 166 167 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 168 { 169 return container_of(chan, struct pl08x_dma_chan, chan); 170 } 171 172 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 173 { 174 return container_of(tx, struct pl08x_txd, tx); 175 } 176 177 /* 178 * Physical channel handling 179 */ 180 181 /* Whether a certain channel is busy or not */ 182 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 183 { 184 unsigned int val; 185 186 val = readl(ch->base + PL080_CH_CONFIG); 187 return val & PL080_CONFIG_ACTIVE; 188 } 189 190 /* 191 * Set the initial DMA register values i.e. those for the first LLI 192 * The next LLI pointer and the configuration interrupt bit have 193 * been set when the LLIs were constructed. Poke them into the hardware 194 * and start the transfer. 195 */ 196 static void pl08x_start_txd(struct pl08x_dma_chan *plchan, 197 struct pl08x_txd *txd) 198 { 199 struct pl08x_driver_data *pl08x = plchan->host; 200 struct pl08x_phy_chan *phychan = plchan->phychan; 201 struct pl08x_lli *lli = &txd->llis_va[0]; 202 u32 val; 203 204 plchan->at = txd; 205 206 /* Wait for channel inactive */ 207 while (pl08x_phy_channel_busy(phychan)) 208 cpu_relax(); 209 210 dev_vdbg(&pl08x->adev->dev, 211 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 212 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 213 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, 214 txd->ccfg); 215 216 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); 217 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); 218 writel(lli->lli, phychan->base + PL080_CH_LLI); 219 writel(lli->cctl, phychan->base + PL080_CH_CONTROL); 220 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); 221 222 /* Enable the DMA channel */ 223 /* Do not access config register until channel shows as disabled */ 224 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 225 cpu_relax(); 226 227 /* Do not access config register until channel shows as inactive */ 228 val = readl(phychan->base + PL080_CH_CONFIG); 229 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 230 val = readl(phychan->base + PL080_CH_CONFIG); 231 232 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 233 } 234 235 /* 236 * Pause the channel by setting the HALT bit. 237 * 238 * For M->P transfers, pause the DMAC first and then stop the peripheral - 239 * the FIFO can only drain if the peripheral is still requesting data. 240 * (note: this can still timeout if the DMAC FIFO never drains of data.) 241 * 242 * For P->M transfers, disable the peripheral first to stop it filling 243 * the DMAC FIFO, and then pause the DMAC. 244 */ 245 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 246 { 247 u32 val; 248 int timeout; 249 250 /* Set the HALT bit and wait for the FIFO to drain */ 251 val = readl(ch->base + PL080_CH_CONFIG); 252 val |= PL080_CONFIG_HALT; 253 writel(val, ch->base + PL080_CH_CONFIG); 254 255 /* Wait for channel inactive */ 256 for (timeout = 1000; timeout; timeout--) { 257 if (!pl08x_phy_channel_busy(ch)) 258 break; 259 udelay(1); 260 } 261 if (pl08x_phy_channel_busy(ch)) 262 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 263 } 264 265 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 266 { 267 u32 val; 268 269 /* Clear the HALT bit */ 270 val = readl(ch->base + PL080_CH_CONFIG); 271 val &= ~PL080_CONFIG_HALT; 272 writel(val, ch->base + PL080_CH_CONFIG); 273 } 274 275 276 /* 277 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 278 * clears any pending interrupt status. This should not be used for 279 * an on-going transfer, but as a method of shutting down a channel 280 * (eg, when it's no longer used) or terminating a transfer. 281 */ 282 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 283 struct pl08x_phy_chan *ch) 284 { 285 u32 val = readl(ch->base + PL080_CH_CONFIG); 286 287 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 288 PL080_CONFIG_TC_IRQ_MASK); 289 290 writel(val, ch->base + PL080_CH_CONFIG); 291 292 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 293 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 294 } 295 296 static inline u32 get_bytes_in_cctl(u32 cctl) 297 { 298 /* The source width defines the number of bytes */ 299 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 300 301 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 302 case PL080_WIDTH_8BIT: 303 break; 304 case PL080_WIDTH_16BIT: 305 bytes *= 2; 306 break; 307 case PL080_WIDTH_32BIT: 308 bytes *= 4; 309 break; 310 } 311 return bytes; 312 } 313 314 /* The channel should be paused when calling this */ 315 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 316 { 317 struct pl08x_phy_chan *ch; 318 struct pl08x_txd *txd; 319 unsigned long flags; 320 size_t bytes = 0; 321 322 spin_lock_irqsave(&plchan->lock, flags); 323 ch = plchan->phychan; 324 txd = plchan->at; 325 326 /* 327 * Follow the LLIs to get the number of remaining 328 * bytes in the currently active transaction. 329 */ 330 if (ch && txd) { 331 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 332 333 /* First get the remaining bytes in the active transfer */ 334 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 335 336 if (clli) { 337 struct pl08x_lli *llis_va = txd->llis_va; 338 dma_addr_t llis_bus = txd->llis_bus; 339 int index; 340 341 BUG_ON(clli < llis_bus || clli >= llis_bus + 342 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 343 344 /* 345 * Locate the next LLI - as this is an array, 346 * it's simple maths to find. 347 */ 348 index = (clli - llis_bus) / sizeof(struct pl08x_lli); 349 350 for (; index < MAX_NUM_TSFR_LLIS; index++) { 351 bytes += get_bytes_in_cctl(llis_va[index].cctl); 352 353 /* 354 * A LLI pointer of 0 terminates the LLI list 355 */ 356 if (!llis_va[index].lli) 357 break; 358 } 359 } 360 } 361 362 /* Sum up all queued transactions */ 363 if (!list_empty(&plchan->pend_list)) { 364 struct pl08x_txd *txdi; 365 list_for_each_entry(txdi, &plchan->pend_list, node) { 366 bytes += txdi->len; 367 } 368 } 369 370 spin_unlock_irqrestore(&plchan->lock, flags); 371 372 return bytes; 373 } 374 375 /* 376 * Allocate a physical channel for a virtual channel 377 * 378 * Try to locate a physical channel to be used for this transfer. If all 379 * are taken return NULL and the requester will have to cope by using 380 * some fallback PIO mode or retrying later. 381 */ 382 static struct pl08x_phy_chan * 383 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 384 struct pl08x_dma_chan *virt_chan) 385 { 386 struct pl08x_phy_chan *ch = NULL; 387 unsigned long flags; 388 int i; 389 390 for (i = 0; i < pl08x->vd->channels; i++) { 391 ch = &pl08x->phy_chans[i]; 392 393 spin_lock_irqsave(&ch->lock, flags); 394 395 if (!ch->serving) { 396 ch->serving = virt_chan; 397 ch->signal = -1; 398 spin_unlock_irqrestore(&ch->lock, flags); 399 break; 400 } 401 402 spin_unlock_irqrestore(&ch->lock, flags); 403 } 404 405 if (i == pl08x->vd->channels) { 406 /* No physical channel available, cope with it */ 407 return NULL; 408 } 409 410 return ch; 411 } 412 413 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 414 struct pl08x_phy_chan *ch) 415 { 416 unsigned long flags; 417 418 spin_lock_irqsave(&ch->lock, flags); 419 420 /* Stop the channel and clear its interrupts */ 421 pl08x_terminate_phy_chan(pl08x, ch); 422 423 /* Mark it as free */ 424 ch->serving = NULL; 425 spin_unlock_irqrestore(&ch->lock, flags); 426 } 427 428 /* 429 * LLI handling 430 */ 431 432 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 433 { 434 switch (coded) { 435 case PL080_WIDTH_8BIT: 436 return 1; 437 case PL080_WIDTH_16BIT: 438 return 2; 439 case PL080_WIDTH_32BIT: 440 return 4; 441 default: 442 break; 443 } 444 BUG(); 445 return 0; 446 } 447 448 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 449 size_t tsize) 450 { 451 u32 retbits = cctl; 452 453 /* Remove all src, dst and transfer size bits */ 454 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 455 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 456 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 457 458 /* Then set the bits according to the parameters */ 459 switch (srcwidth) { 460 case 1: 461 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 462 break; 463 case 2: 464 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 465 break; 466 case 4: 467 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 468 break; 469 default: 470 BUG(); 471 break; 472 } 473 474 switch (dstwidth) { 475 case 1: 476 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 477 break; 478 case 2: 479 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 480 break; 481 case 4: 482 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 483 break; 484 default: 485 BUG(); 486 break; 487 } 488 489 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 490 return retbits; 491 } 492 493 struct pl08x_lli_build_data { 494 struct pl08x_txd *txd; 495 struct pl08x_bus_data srcbus; 496 struct pl08x_bus_data dstbus; 497 size_t remainder; 498 u32 lli_bus; 499 }; 500 501 /* 502 * Autoselect a master bus to use for the transfer this prefers the 503 * destination bus if both available if fixed address on one bus the 504 * other will be chosen 505 */ 506 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 507 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 508 { 509 if (!(cctl & PL080_CONTROL_DST_INCR)) { 510 *mbus = &bd->srcbus; 511 *sbus = &bd->dstbus; 512 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 513 *mbus = &bd->dstbus; 514 *sbus = &bd->srcbus; 515 } else { 516 if (bd->dstbus.buswidth == 4) { 517 *mbus = &bd->dstbus; 518 *sbus = &bd->srcbus; 519 } else if (bd->srcbus.buswidth == 4) { 520 *mbus = &bd->srcbus; 521 *sbus = &bd->dstbus; 522 } else if (bd->dstbus.buswidth == 2) { 523 *mbus = &bd->dstbus; 524 *sbus = &bd->srcbus; 525 } else if (bd->srcbus.buswidth == 2) { 526 *mbus = &bd->srcbus; 527 *sbus = &bd->dstbus; 528 } else { 529 /* bd->srcbus.buswidth == 1 */ 530 *mbus = &bd->dstbus; 531 *sbus = &bd->srcbus; 532 } 533 } 534 } 535 536 /* 537 * Fills in one LLI for a certain transfer descriptor and advance the counter 538 */ 539 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 540 int num_llis, int len, u32 cctl) 541 { 542 struct pl08x_lli *llis_va = bd->txd->llis_va; 543 dma_addr_t llis_bus = bd->txd->llis_bus; 544 545 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 546 547 llis_va[num_llis].cctl = cctl; 548 llis_va[num_llis].src = bd->srcbus.addr; 549 llis_va[num_llis].dst = bd->dstbus.addr; 550 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 551 llis_va[num_llis].lli |= bd->lli_bus; 552 553 if (cctl & PL080_CONTROL_SRC_INCR) 554 bd->srcbus.addr += len; 555 if (cctl & PL080_CONTROL_DST_INCR) 556 bd->dstbus.addr += len; 557 558 BUG_ON(bd->remainder < len); 559 560 bd->remainder -= len; 561 } 562 563 /* 564 * Return number of bytes to fill to boundary, or len. 565 * This calculation works for any value of addr. 566 */ 567 static inline size_t pl08x_pre_boundary(u32 addr, size_t len) 568 { 569 size_t boundary_len = PL08X_BOUNDARY_SIZE - 570 (addr & (PL08X_BOUNDARY_SIZE - 1)); 571 572 return min(boundary_len, len); 573 } 574 575 /* 576 * This fills in the table of LLIs for the transfer descriptor 577 * Note that we assume we never have to change the burst sizes 578 * Return 0 for error 579 */ 580 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 581 struct pl08x_txd *txd) 582 { 583 struct pl08x_bus_data *mbus, *sbus; 584 struct pl08x_lli_build_data bd; 585 int num_llis = 0; 586 u32 cctl; 587 size_t max_bytes_per_lli; 588 size_t total_bytes = 0; 589 struct pl08x_lli *llis_va; 590 591 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 592 &txd->llis_bus); 593 if (!txd->llis_va) { 594 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 595 return 0; 596 } 597 598 pl08x->pool_ctr++; 599 600 /* Get the default CCTL */ 601 cctl = txd->cctl; 602 603 bd.txd = txd; 604 bd.srcbus.addr = txd->src_addr; 605 bd.dstbus.addr = txd->dst_addr; 606 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 607 608 /* Find maximum width of the source bus */ 609 bd.srcbus.maxwidth = 610 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 611 PL080_CONTROL_SWIDTH_SHIFT); 612 613 /* Find maximum width of the destination bus */ 614 bd.dstbus.maxwidth = 615 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 616 PL080_CONTROL_DWIDTH_SHIFT); 617 618 /* Set up the bus widths to the maximum */ 619 bd.srcbus.buswidth = bd.srcbus.maxwidth; 620 bd.dstbus.buswidth = bd.dstbus.maxwidth; 621 622 /* 623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 624 */ 625 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 626 PL080_CONTROL_TRANSFER_SIZE_MASK; 627 628 /* We need to count this down to zero */ 629 bd.remainder = txd->len; 630 631 /* 632 * Choose bus to align to 633 * - prefers destination bus if both available 634 * - if fixed address on one bus chooses other 635 */ 636 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 637 638 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 639 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 640 bd.srcbus.buswidth, 641 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 642 bd.dstbus.buswidth, 643 bd.remainder, max_bytes_per_lli); 644 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 645 mbus == &bd.srcbus ? "src" : "dst", 646 sbus == &bd.srcbus ? "src" : "dst"); 647 648 if (txd->len < mbus->buswidth) { 649 /* Less than a bus width available - send as single bytes */ 650 while (bd.remainder) { 651 dev_vdbg(&pl08x->adev->dev, 652 "%s single byte LLIs for a transfer of " 653 "less than a bus width (remain 0x%08x)\n", 654 __func__, bd.remainder); 655 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 656 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 657 total_bytes++; 658 } 659 } else { 660 /* Make one byte LLIs until master bus is aligned */ 661 while ((mbus->addr) % (mbus->buswidth)) { 662 dev_vdbg(&pl08x->adev->dev, 663 "%s adjustment lli for less than bus width " 664 "(remain 0x%08x)\n", 665 __func__, bd.remainder); 666 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 667 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 668 total_bytes++; 669 } 670 671 /* 672 * Master now aligned 673 * - if slave is not then we must set its width down 674 */ 675 if (sbus->addr % sbus->buswidth) { 676 dev_dbg(&pl08x->adev->dev, 677 "%s set down bus width to one byte\n", 678 __func__); 679 680 sbus->buswidth = 1; 681 } 682 683 /* 684 * Make largest possible LLIs until less than one bus 685 * width left 686 */ 687 while (bd.remainder > (mbus->buswidth - 1)) { 688 size_t lli_len, target_len, tsize, odd_bytes; 689 690 /* 691 * If enough left try to send max possible, 692 * otherwise try to send the remainder 693 */ 694 target_len = min(bd.remainder, max_bytes_per_lli); 695 696 /* 697 * Set bus lengths for incrementing buses to the 698 * number of bytes which fill to next memory boundary, 699 * limiting on the target length calculated above. 700 */ 701 if (cctl & PL080_CONTROL_SRC_INCR) 702 bd.srcbus.fill_bytes = 703 pl08x_pre_boundary(bd.srcbus.addr, 704 target_len); 705 else 706 bd.srcbus.fill_bytes = target_len; 707 708 if (cctl & PL080_CONTROL_DST_INCR) 709 bd.dstbus.fill_bytes = 710 pl08x_pre_boundary(bd.dstbus.addr, 711 target_len); 712 else 713 bd.dstbus.fill_bytes = target_len; 714 715 /* Find the nearest */ 716 lli_len = min(bd.srcbus.fill_bytes, 717 bd.dstbus.fill_bytes); 718 719 BUG_ON(lli_len > bd.remainder); 720 721 if (lli_len <= 0) { 722 dev_err(&pl08x->adev->dev, 723 "%s lli_len is %zu, <= 0\n", 724 __func__, lli_len); 725 return 0; 726 } 727 728 if (lli_len == target_len) { 729 /* 730 * Can send what we wanted. 731 * Maintain alignment 732 */ 733 lli_len = (lli_len/mbus->buswidth) * 734 mbus->buswidth; 735 odd_bytes = 0; 736 } else { 737 /* 738 * So now we know how many bytes to transfer 739 * to get to the nearest boundary. The next 740 * LLI will past the boundary. However, we 741 * may be working to a boundary on the slave 742 * bus. We need to ensure the master stays 743 * aligned, and that we are working in 744 * multiples of the bus widths. 745 */ 746 odd_bytes = lli_len % mbus->buswidth; 747 lli_len -= odd_bytes; 748 749 } 750 751 if (lli_len) { 752 /* 753 * Check against minimum bus alignment: 754 * Calculate actual transfer size in relation 755 * to bus width an get a maximum remainder of 756 * the smallest bus width - 1 757 */ 758 /* FIXME: use round_down()? */ 759 tsize = lli_len / min(mbus->buswidth, 760 sbus->buswidth); 761 lli_len = tsize * min(mbus->buswidth, 762 sbus->buswidth); 763 764 if (target_len != lli_len) { 765 dev_vdbg(&pl08x->adev->dev, 766 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", 767 __func__, target_len, lli_len, txd->len); 768 } 769 770 cctl = pl08x_cctl_bits(cctl, 771 bd.srcbus.buswidth, 772 bd.dstbus.buswidth, 773 tsize); 774 775 dev_vdbg(&pl08x->adev->dev, 776 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 777 __func__, lli_len, bd.remainder); 778 pl08x_fill_lli_for_desc(&bd, num_llis++, 779 lli_len, cctl); 780 total_bytes += lli_len; 781 } 782 783 784 if (odd_bytes) { 785 /* 786 * Creep past the boundary, maintaining 787 * master alignment 788 */ 789 int j; 790 for (j = 0; (j < mbus->buswidth) 791 && (bd.remainder); j++) { 792 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 793 dev_vdbg(&pl08x->adev->dev, 794 "%s align with boundary, single byte (remain 0x%08zx)\n", 795 __func__, bd.remainder); 796 pl08x_fill_lli_for_desc(&bd, 797 num_llis++, 1, cctl); 798 total_bytes++; 799 } 800 } 801 } 802 803 /* 804 * Send any odd bytes 805 */ 806 while (bd.remainder) { 807 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 808 dev_vdbg(&pl08x->adev->dev, 809 "%s align with boundary, single odd byte (remain %zu)\n", 810 __func__, bd.remainder); 811 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 812 total_bytes++; 813 } 814 } 815 if (total_bytes != txd->len) { 816 dev_err(&pl08x->adev->dev, 817 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 818 __func__, total_bytes, txd->len); 819 return 0; 820 } 821 822 if (num_llis >= MAX_NUM_TSFR_LLIS) { 823 dev_err(&pl08x->adev->dev, 824 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 825 __func__, (u32) MAX_NUM_TSFR_LLIS); 826 return 0; 827 } 828 829 llis_va = txd->llis_va; 830 /* The final LLI terminates the LLI. */ 831 llis_va[num_llis - 1].lli = 0; 832 /* The final LLI element shall also fire an interrupt. */ 833 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 834 835 #ifdef VERBOSE_DEBUG 836 { 837 int i; 838 839 dev_vdbg(&pl08x->adev->dev, 840 "%-3s %-9s %-10s %-10s %-10s %s\n", 841 "lli", "", "csrc", "cdst", "clli", "cctl"); 842 for (i = 0; i < num_llis; i++) { 843 dev_vdbg(&pl08x->adev->dev, 844 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 845 i, &llis_va[i], llis_va[i].src, 846 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl 847 ); 848 } 849 } 850 #endif 851 852 return num_llis; 853 } 854 855 /* You should call this with the struct pl08x lock held */ 856 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 857 struct pl08x_txd *txd) 858 { 859 /* Free the LLI */ 860 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 861 862 pl08x->pool_ctr--; 863 864 kfree(txd); 865 } 866 867 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 868 struct pl08x_dma_chan *plchan) 869 { 870 struct pl08x_txd *txdi = NULL; 871 struct pl08x_txd *next; 872 873 if (!list_empty(&plchan->pend_list)) { 874 list_for_each_entry_safe(txdi, 875 next, &plchan->pend_list, node) { 876 list_del(&txdi->node); 877 pl08x_free_txd(pl08x, txdi); 878 } 879 } 880 } 881 882 /* 883 * The DMA ENGINE API 884 */ 885 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 886 { 887 return 0; 888 } 889 890 static void pl08x_free_chan_resources(struct dma_chan *chan) 891 { 892 } 893 894 /* 895 * This should be called with the channel plchan->lock held 896 */ 897 static int prep_phy_channel(struct pl08x_dma_chan *plchan, 898 struct pl08x_txd *txd) 899 { 900 struct pl08x_driver_data *pl08x = plchan->host; 901 struct pl08x_phy_chan *ch; 902 int ret; 903 904 /* Check if we already have a channel */ 905 if (plchan->phychan) 906 return 0; 907 908 ch = pl08x_get_phy_channel(pl08x, plchan); 909 if (!ch) { 910 /* No physical channel available, cope with it */ 911 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 912 return -EBUSY; 913 } 914 915 /* 916 * OK we have a physical channel: for memcpy() this is all we 917 * need, but for slaves the physical signals may be muxed! 918 * Can the platform allow us to use this channel? 919 */ 920 if (plchan->slave && 921 ch->signal < 0 && 922 pl08x->pd->get_signal) { 923 ret = pl08x->pd->get_signal(plchan); 924 if (ret < 0) { 925 dev_dbg(&pl08x->adev->dev, 926 "unable to use physical channel %d for transfer on %s due to platform restrictions\n", 927 ch->id, plchan->name); 928 /* Release physical channel & return */ 929 pl08x_put_phy_channel(pl08x, ch); 930 return -EBUSY; 931 } 932 ch->signal = ret; 933 934 /* Assign the flow control signal to this channel */ 935 if (txd->direction == DMA_TO_DEVICE) 936 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; 937 else if (txd->direction == DMA_FROM_DEVICE) 938 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; 939 } 940 941 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 942 ch->id, 943 ch->signal, 944 plchan->name); 945 946 plchan->phychan_hold++; 947 plchan->phychan = ch; 948 949 return 0; 950 } 951 952 static void release_phy_channel(struct pl08x_dma_chan *plchan) 953 { 954 struct pl08x_driver_data *pl08x = plchan->host; 955 956 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { 957 pl08x->pd->put_signal(plchan); 958 plchan->phychan->signal = -1; 959 } 960 pl08x_put_phy_channel(pl08x, plchan->phychan); 961 plchan->phychan = NULL; 962 } 963 964 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 965 { 966 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 967 struct pl08x_txd *txd = to_pl08x_txd(tx); 968 unsigned long flags; 969 970 spin_lock_irqsave(&plchan->lock, flags); 971 972 plchan->chan.cookie += 1; 973 if (plchan->chan.cookie < 0) 974 plchan->chan.cookie = 1; 975 tx->cookie = plchan->chan.cookie; 976 977 /* Put this onto the pending list */ 978 list_add_tail(&txd->node, &plchan->pend_list); 979 980 /* 981 * If there was no physical channel available for this memcpy, 982 * stack the request up and indicate that the channel is waiting 983 * for a free physical channel. 984 */ 985 if (!plchan->slave && !plchan->phychan) { 986 /* Do this memcpy whenever there is a channel ready */ 987 plchan->state = PL08X_CHAN_WAITING; 988 plchan->waiting = txd; 989 } else { 990 plchan->phychan_hold--; 991 } 992 993 spin_unlock_irqrestore(&plchan->lock, flags); 994 995 return tx->cookie; 996 } 997 998 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 999 struct dma_chan *chan, unsigned long flags) 1000 { 1001 struct dma_async_tx_descriptor *retval = NULL; 1002 1003 return retval; 1004 } 1005 1006 /* 1007 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1008 * If slaves are relying on interrupts to signal completion this function 1009 * must not be called with interrupts disabled. 1010 */ 1011 static enum dma_status 1012 pl08x_dma_tx_status(struct dma_chan *chan, 1013 dma_cookie_t cookie, 1014 struct dma_tx_state *txstate) 1015 { 1016 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1017 dma_cookie_t last_used; 1018 dma_cookie_t last_complete; 1019 enum dma_status ret; 1020 u32 bytesleft = 0; 1021 1022 last_used = plchan->chan.cookie; 1023 last_complete = plchan->lc; 1024 1025 ret = dma_async_is_complete(cookie, last_complete, last_used); 1026 if (ret == DMA_SUCCESS) { 1027 dma_set_tx_state(txstate, last_complete, last_used, 0); 1028 return ret; 1029 } 1030 1031 /* 1032 * This cookie not complete yet 1033 */ 1034 last_used = plchan->chan.cookie; 1035 last_complete = plchan->lc; 1036 1037 /* Get number of bytes left in the active transactions and queue */ 1038 bytesleft = pl08x_getbytes_chan(plchan); 1039 1040 dma_set_tx_state(txstate, last_complete, last_used, 1041 bytesleft); 1042 1043 if (plchan->state == PL08X_CHAN_PAUSED) 1044 return DMA_PAUSED; 1045 1046 /* Whether waiting or running, we're in progress */ 1047 return DMA_IN_PROGRESS; 1048 } 1049 1050 /* PrimeCell DMA extension */ 1051 struct burst_table { 1052 u32 burstwords; 1053 u32 reg; 1054 }; 1055 1056 static const struct burst_table burst_sizes[] = { 1057 { 1058 .burstwords = 256, 1059 .reg = PL080_BSIZE_256, 1060 }, 1061 { 1062 .burstwords = 128, 1063 .reg = PL080_BSIZE_128, 1064 }, 1065 { 1066 .burstwords = 64, 1067 .reg = PL080_BSIZE_64, 1068 }, 1069 { 1070 .burstwords = 32, 1071 .reg = PL080_BSIZE_32, 1072 }, 1073 { 1074 .burstwords = 16, 1075 .reg = PL080_BSIZE_16, 1076 }, 1077 { 1078 .burstwords = 8, 1079 .reg = PL080_BSIZE_8, 1080 }, 1081 { 1082 .burstwords = 4, 1083 .reg = PL080_BSIZE_4, 1084 }, 1085 { 1086 .burstwords = 0, 1087 .reg = PL080_BSIZE_1, 1088 }, 1089 }; 1090 1091 /* 1092 * Given the source and destination available bus masks, select which 1093 * will be routed to each port. We try to have source and destination 1094 * on separate ports, but always respect the allowable settings. 1095 */ 1096 static u32 pl08x_select_bus(u8 src, u8 dst) 1097 { 1098 u32 cctl = 0; 1099 1100 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1101 cctl |= PL080_CONTROL_DST_AHB2; 1102 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1103 cctl |= PL080_CONTROL_SRC_AHB2; 1104 1105 return cctl; 1106 } 1107 1108 static u32 pl08x_cctl(u32 cctl) 1109 { 1110 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1111 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1112 PL080_CONTROL_PROT_MASK); 1113 1114 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1115 return cctl | PL080_CONTROL_PROT_SYS; 1116 } 1117 1118 static u32 pl08x_width(enum dma_slave_buswidth width) 1119 { 1120 switch (width) { 1121 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1122 return PL080_WIDTH_8BIT; 1123 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1124 return PL080_WIDTH_16BIT; 1125 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1126 return PL080_WIDTH_32BIT; 1127 default: 1128 return ~0; 1129 } 1130 } 1131 1132 static u32 pl08x_burst(u32 maxburst) 1133 { 1134 int i; 1135 1136 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1137 if (burst_sizes[i].burstwords <= maxburst) 1138 break; 1139 1140 return burst_sizes[i].reg; 1141 } 1142 1143 static int dma_set_runtime_config(struct dma_chan *chan, 1144 struct dma_slave_config *config) 1145 { 1146 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1147 struct pl08x_driver_data *pl08x = plchan->host; 1148 enum dma_slave_buswidth addr_width; 1149 u32 width, burst, maxburst; 1150 u32 cctl = 0; 1151 1152 if (!plchan->slave) 1153 return -EINVAL; 1154 1155 /* Transfer direction */ 1156 plchan->runtime_direction = config->direction; 1157 if (config->direction == DMA_TO_DEVICE) { 1158 addr_width = config->dst_addr_width; 1159 maxburst = config->dst_maxburst; 1160 } else if (config->direction == DMA_FROM_DEVICE) { 1161 addr_width = config->src_addr_width; 1162 maxburst = config->src_maxburst; 1163 } else { 1164 dev_err(&pl08x->adev->dev, 1165 "bad runtime_config: alien transfer direction\n"); 1166 return -EINVAL; 1167 } 1168 1169 width = pl08x_width(addr_width); 1170 if (width == ~0) { 1171 dev_err(&pl08x->adev->dev, 1172 "bad runtime_config: alien address width\n"); 1173 return -EINVAL; 1174 } 1175 1176 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1177 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1178 1179 /* 1180 * If this channel will only request single transfers, set this 1181 * down to ONE element. Also select one element if no maxburst 1182 * is specified. 1183 */ 1184 if (plchan->cd->single) 1185 maxburst = 1; 1186 1187 burst = pl08x_burst(maxburst); 1188 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1189 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1190 1191 if (plchan->runtime_direction == DMA_FROM_DEVICE) { 1192 plchan->src_addr = config->src_addr; 1193 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1194 pl08x_select_bus(plchan->cd->periph_buses, 1195 pl08x->mem_buses); 1196 } else { 1197 plchan->dst_addr = config->dst_addr; 1198 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | 1199 pl08x_select_bus(pl08x->mem_buses, 1200 plchan->cd->periph_buses); 1201 } 1202 1203 dev_dbg(&pl08x->adev->dev, 1204 "configured channel %s (%s) for %s, data width %d, " 1205 "maxburst %d words, LE, CCTL=0x%08x\n", 1206 dma_chan_name(chan), plchan->name, 1207 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1208 addr_width, 1209 maxburst, 1210 cctl); 1211 1212 return 0; 1213 } 1214 1215 /* 1216 * Slave transactions callback to the slave device to allow 1217 * synchronization of slave DMA signals with the DMAC enable 1218 */ 1219 static void pl08x_issue_pending(struct dma_chan *chan) 1220 { 1221 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1222 unsigned long flags; 1223 1224 spin_lock_irqsave(&plchan->lock, flags); 1225 /* Something is already active, or we're waiting for a channel... */ 1226 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { 1227 spin_unlock_irqrestore(&plchan->lock, flags); 1228 return; 1229 } 1230 1231 /* Take the first element in the queue and execute it */ 1232 if (!list_empty(&plchan->pend_list)) { 1233 struct pl08x_txd *next; 1234 1235 next = list_first_entry(&plchan->pend_list, 1236 struct pl08x_txd, 1237 node); 1238 list_del(&next->node); 1239 plchan->state = PL08X_CHAN_RUNNING; 1240 1241 pl08x_start_txd(plchan, next); 1242 } 1243 1244 spin_unlock_irqrestore(&plchan->lock, flags); 1245 } 1246 1247 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1248 struct pl08x_txd *txd) 1249 { 1250 struct pl08x_driver_data *pl08x = plchan->host; 1251 unsigned long flags; 1252 int num_llis, ret; 1253 1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1255 if (!num_llis) { 1256 kfree(txd); 1257 return -EINVAL; 1258 } 1259 1260 spin_lock_irqsave(&plchan->lock, flags); 1261 1262 /* 1263 * See if we already have a physical channel allocated, 1264 * else this is the time to try to get one. 1265 */ 1266 ret = prep_phy_channel(plchan, txd); 1267 if (ret) { 1268 /* 1269 * No physical channel was available. 1270 * 1271 * memcpy transfers can be sorted out at submission time. 1272 * 1273 * Slave transfers may have been denied due to platform 1274 * channel muxing restrictions. Since there is no guarantee 1275 * that this will ever be resolved, and the signal must be 1276 * acquired AFTER acquiring the physical channel, we will let 1277 * them be NACK:ed with -EBUSY here. The drivers can retry 1278 * the prep() call if they are eager on doing this using DMA. 1279 */ 1280 if (plchan->slave) { 1281 pl08x_free_txd_list(pl08x, plchan); 1282 pl08x_free_txd(pl08x, txd); 1283 spin_unlock_irqrestore(&plchan->lock, flags); 1284 return -EBUSY; 1285 } 1286 } else 1287 /* 1288 * Else we're all set, paused and ready to roll, status 1289 * will switch to PL08X_CHAN_RUNNING when we call 1290 * issue_pending(). If there is something running on the 1291 * channel already we don't change its state. 1292 */ 1293 if (plchan->state == PL08X_CHAN_IDLE) 1294 plchan->state = PL08X_CHAN_PAUSED; 1295 1296 spin_unlock_irqrestore(&plchan->lock, flags); 1297 1298 return 0; 1299 } 1300 1301 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1302 unsigned long flags) 1303 { 1304 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1305 1306 if (txd) { 1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1308 txd->tx.flags = flags; 1309 txd->tx.tx_submit = pl08x_tx_submit; 1310 INIT_LIST_HEAD(&txd->node); 1311 1312 /* Always enable error and terminal interrupts */ 1313 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1314 PL080_CONFIG_TC_IRQ_MASK; 1315 } 1316 return txd; 1317 } 1318 1319 /* 1320 * Initialize a descriptor to be used by memcpy submit 1321 */ 1322 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1323 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1324 size_t len, unsigned long flags) 1325 { 1326 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1327 struct pl08x_driver_data *pl08x = plchan->host; 1328 struct pl08x_txd *txd; 1329 int ret; 1330 1331 txd = pl08x_get_txd(plchan, flags); 1332 if (!txd) { 1333 dev_err(&pl08x->adev->dev, 1334 "%s no memory for descriptor\n", __func__); 1335 return NULL; 1336 } 1337 1338 txd->direction = DMA_NONE; 1339 txd->src_addr = src; 1340 txd->dst_addr = dest; 1341 txd->len = len; 1342 1343 /* Set platform data for m2m */ 1344 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1345 txd->cctl = pl08x->pd->memcpy_channel.cctl & 1346 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1347 1348 /* Both to be incremented or the code will break */ 1349 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1350 1351 if (pl08x->vd->dualmaster) 1352 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1353 pl08x->mem_buses); 1354 1355 ret = pl08x_prep_channel_resources(plchan, txd); 1356 if (ret) 1357 return NULL; 1358 1359 return &txd->tx; 1360 } 1361 1362 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1363 struct dma_chan *chan, struct scatterlist *sgl, 1364 unsigned int sg_len, enum dma_data_direction direction, 1365 unsigned long flags) 1366 { 1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1368 struct pl08x_driver_data *pl08x = plchan->host; 1369 struct pl08x_txd *txd; 1370 int ret; 1371 1372 /* 1373 * Current implementation ASSUMES only one sg 1374 */ 1375 if (sg_len != 1) { 1376 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", 1377 __func__); 1378 BUG(); 1379 } 1380 1381 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1382 __func__, sgl->length, plchan->name); 1383 1384 txd = pl08x_get_txd(plchan, flags); 1385 if (!txd) { 1386 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1387 return NULL; 1388 } 1389 1390 if (direction != plchan->runtime_direction) 1391 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1392 "the direction configured for the PrimeCell\n", 1393 __func__); 1394 1395 /* 1396 * Set up addresses, the PrimeCell configured address 1397 * will take precedence since this may configure the 1398 * channel target address dynamically at runtime. 1399 */ 1400 txd->direction = direction; 1401 txd->len = sgl->length; 1402 1403 if (direction == DMA_TO_DEVICE) { 1404 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1405 txd->cctl = plchan->dst_cctl; 1406 txd->src_addr = sgl->dma_address; 1407 txd->dst_addr = plchan->dst_addr; 1408 } else if (direction == DMA_FROM_DEVICE) { 1409 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1410 txd->cctl = plchan->src_cctl; 1411 txd->src_addr = plchan->src_addr; 1412 txd->dst_addr = sgl->dma_address; 1413 } else { 1414 dev_err(&pl08x->adev->dev, 1415 "%s direction unsupported\n", __func__); 1416 return NULL; 1417 } 1418 1419 ret = pl08x_prep_channel_resources(plchan, txd); 1420 if (ret) 1421 return NULL; 1422 1423 return &txd->tx; 1424 } 1425 1426 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1427 unsigned long arg) 1428 { 1429 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1430 struct pl08x_driver_data *pl08x = plchan->host; 1431 unsigned long flags; 1432 int ret = 0; 1433 1434 /* Controls applicable to inactive channels */ 1435 if (cmd == DMA_SLAVE_CONFIG) { 1436 return dma_set_runtime_config(chan, 1437 (struct dma_slave_config *)arg); 1438 } 1439 1440 /* 1441 * Anything succeeds on channels with no physical allocation and 1442 * no queued transfers. 1443 */ 1444 spin_lock_irqsave(&plchan->lock, flags); 1445 if (!plchan->phychan && !plchan->at) { 1446 spin_unlock_irqrestore(&plchan->lock, flags); 1447 return 0; 1448 } 1449 1450 switch (cmd) { 1451 case DMA_TERMINATE_ALL: 1452 plchan->state = PL08X_CHAN_IDLE; 1453 1454 if (plchan->phychan) { 1455 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 1456 1457 /* 1458 * Mark physical channel as free and free any slave 1459 * signal 1460 */ 1461 release_phy_channel(plchan); 1462 } 1463 /* Dequeue jobs and free LLIs */ 1464 if (plchan->at) { 1465 pl08x_free_txd(pl08x, plchan->at); 1466 plchan->at = NULL; 1467 } 1468 /* Dequeue jobs not yet fired as well */ 1469 pl08x_free_txd_list(pl08x, plchan); 1470 break; 1471 case DMA_PAUSE: 1472 pl08x_pause_phy_chan(plchan->phychan); 1473 plchan->state = PL08X_CHAN_PAUSED; 1474 break; 1475 case DMA_RESUME: 1476 pl08x_resume_phy_chan(plchan->phychan); 1477 plchan->state = PL08X_CHAN_RUNNING; 1478 break; 1479 default: 1480 /* Unknown command */ 1481 ret = -ENXIO; 1482 break; 1483 } 1484 1485 spin_unlock_irqrestore(&plchan->lock, flags); 1486 1487 return ret; 1488 } 1489 1490 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1491 { 1492 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1493 char *name = chan_id; 1494 1495 /* Check that the channel is not taken! */ 1496 if (!strcmp(plchan->name, name)) 1497 return true; 1498 1499 return false; 1500 } 1501 1502 /* 1503 * Just check that the device is there and active 1504 * TODO: turn this bit on/off depending on the number of physical channels 1505 * actually used, if it is zero... well shut it off. That will save some 1506 * power. Cut the clock at the same time. 1507 */ 1508 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1509 { 1510 u32 val; 1511 1512 val = readl(pl08x->base + PL080_CONFIG); 1513 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1514 /* We implicitly clear bit 1 and that means little-endian mode */ 1515 val |= PL080_CONFIG_ENABLE; 1516 writel(val, pl08x->base + PL080_CONFIG); 1517 } 1518 1519 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1520 { 1521 struct device *dev = txd->tx.chan->device->dev; 1522 1523 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1524 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1525 dma_unmap_single(dev, txd->src_addr, txd->len, 1526 DMA_TO_DEVICE); 1527 else 1528 dma_unmap_page(dev, txd->src_addr, txd->len, 1529 DMA_TO_DEVICE); 1530 } 1531 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1532 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1533 dma_unmap_single(dev, txd->dst_addr, txd->len, 1534 DMA_FROM_DEVICE); 1535 else 1536 dma_unmap_page(dev, txd->dst_addr, txd->len, 1537 DMA_FROM_DEVICE); 1538 } 1539 } 1540 1541 static void pl08x_tasklet(unsigned long data) 1542 { 1543 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1544 struct pl08x_driver_data *pl08x = plchan->host; 1545 struct pl08x_txd *txd; 1546 unsigned long flags; 1547 1548 spin_lock_irqsave(&plchan->lock, flags); 1549 1550 txd = plchan->at; 1551 plchan->at = NULL; 1552 1553 if (txd) { 1554 /* Update last completed */ 1555 plchan->lc = txd->tx.cookie; 1556 } 1557 1558 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1559 if (!list_empty(&plchan->pend_list)) { 1560 struct pl08x_txd *next; 1561 1562 next = list_first_entry(&plchan->pend_list, 1563 struct pl08x_txd, 1564 node); 1565 list_del(&next->node); 1566 1567 pl08x_start_txd(plchan, next); 1568 } else if (plchan->phychan_hold) { 1569 /* 1570 * This channel is still in use - we have a new txd being 1571 * prepared and will soon be queued. Don't give up the 1572 * physical channel. 1573 */ 1574 } else { 1575 struct pl08x_dma_chan *waiting = NULL; 1576 1577 /* 1578 * No more jobs, so free up the physical channel 1579 * Free any allocated signal on slave transfers too 1580 */ 1581 release_phy_channel(plchan); 1582 plchan->state = PL08X_CHAN_IDLE; 1583 1584 /* 1585 * And NOW before anyone else can grab that free:d up 1586 * physical channel, see if there is some memcpy pending 1587 * that seriously needs to start because of being stacked 1588 * up while we were choking the physical channels with data. 1589 */ 1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1591 chan.device_node) { 1592 if (waiting->state == PL08X_CHAN_WAITING && 1593 waiting->waiting != NULL) { 1594 int ret; 1595 1596 /* This should REALLY not fail now */ 1597 ret = prep_phy_channel(waiting, 1598 waiting->waiting); 1599 BUG_ON(ret); 1600 waiting->phychan_hold--; 1601 waiting->state = PL08X_CHAN_RUNNING; 1602 waiting->waiting = NULL; 1603 pl08x_issue_pending(&waiting->chan); 1604 break; 1605 } 1606 } 1607 } 1608 1609 spin_unlock_irqrestore(&plchan->lock, flags); 1610 1611 if (txd) { 1612 dma_async_tx_callback callback = txd->tx.callback; 1613 void *callback_param = txd->tx.callback_param; 1614 1615 /* Don't try to unmap buffers on slave channels */ 1616 if (!plchan->slave) 1617 pl08x_unmap_buffers(txd); 1618 1619 /* Free the descriptor */ 1620 spin_lock_irqsave(&plchan->lock, flags); 1621 pl08x_free_txd(pl08x, txd); 1622 spin_unlock_irqrestore(&plchan->lock, flags); 1623 1624 /* Callback to signal completion */ 1625 if (callback) 1626 callback(callback_param); 1627 } 1628 } 1629 1630 static irqreturn_t pl08x_irq(int irq, void *dev) 1631 { 1632 struct pl08x_driver_data *pl08x = dev; 1633 u32 mask = 0; 1634 u32 val; 1635 int i; 1636 1637 val = readl(pl08x->base + PL080_ERR_STATUS); 1638 if (val) { 1639 /* An error interrupt (on one or more channels) */ 1640 dev_err(&pl08x->adev->dev, 1641 "%s error interrupt, register value 0x%08x\n", 1642 __func__, val); 1643 /* 1644 * Simply clear ALL PL08X error interrupts, 1645 * regardless of channel and cause 1646 * FIXME: should be 0x00000003 on PL081 really. 1647 */ 1648 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1649 } 1650 val = readl(pl08x->base + PL080_INT_STATUS); 1651 for (i = 0; i < pl08x->vd->channels; i++) { 1652 if ((1 << i) & val) { 1653 /* Locate physical channel */ 1654 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1655 struct pl08x_dma_chan *plchan = phychan->serving; 1656 1657 /* Schedule tasklet on this channel */ 1658 tasklet_schedule(&plchan->tasklet); 1659 1660 mask |= (1 << i); 1661 } 1662 } 1663 /* Clear only the terminal interrupts on channels we processed */ 1664 writel(mask, pl08x->base + PL080_TC_CLEAR); 1665 1666 return mask ? IRQ_HANDLED : IRQ_NONE; 1667 } 1668 1669 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1670 { 1671 u32 cctl = pl08x_cctl(chan->cd->cctl); 1672 1673 chan->slave = true; 1674 chan->name = chan->cd->bus_id; 1675 chan->src_addr = chan->cd->addr; 1676 chan->dst_addr = chan->cd->addr; 1677 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | 1678 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); 1679 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | 1680 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); 1681 } 1682 1683 /* 1684 * Initialise the DMAC memcpy/slave channels. 1685 * Make a local wrapper to hold required data 1686 */ 1687 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1688 struct dma_device *dmadev, 1689 unsigned int channels, 1690 bool slave) 1691 { 1692 struct pl08x_dma_chan *chan; 1693 int i; 1694 1695 INIT_LIST_HEAD(&dmadev->channels); 1696 1697 /* 1698 * Register as many many memcpy as we have physical channels, 1699 * we won't always be able to use all but the code will have 1700 * to cope with that situation. 1701 */ 1702 for (i = 0; i < channels; i++) { 1703 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1704 if (!chan) { 1705 dev_err(&pl08x->adev->dev, 1706 "%s no memory for channel\n", __func__); 1707 return -ENOMEM; 1708 } 1709 1710 chan->host = pl08x; 1711 chan->state = PL08X_CHAN_IDLE; 1712 1713 if (slave) { 1714 chan->cd = &pl08x->pd->slave_channels[i]; 1715 pl08x_dma_slave_init(chan); 1716 } else { 1717 chan->cd = &pl08x->pd->memcpy_channel; 1718 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1719 if (!chan->name) { 1720 kfree(chan); 1721 return -ENOMEM; 1722 } 1723 } 1724 if (chan->cd->circular_buffer) { 1725 dev_err(&pl08x->adev->dev, 1726 "channel %s: circular buffers not supported\n", 1727 chan->name); 1728 kfree(chan); 1729 continue; 1730 } 1731 dev_info(&pl08x->adev->dev, 1732 "initialize virtual channel \"%s\"\n", 1733 chan->name); 1734 1735 chan->chan.device = dmadev; 1736 chan->chan.cookie = 0; 1737 chan->lc = 0; 1738 1739 spin_lock_init(&chan->lock); 1740 INIT_LIST_HEAD(&chan->pend_list); 1741 tasklet_init(&chan->tasklet, pl08x_tasklet, 1742 (unsigned long) chan); 1743 1744 list_add_tail(&chan->chan.device_node, &dmadev->channels); 1745 } 1746 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1747 i, slave ? "slave" : "memcpy"); 1748 return i; 1749 } 1750 1751 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1752 { 1753 struct pl08x_dma_chan *chan = NULL; 1754 struct pl08x_dma_chan *next; 1755 1756 list_for_each_entry_safe(chan, 1757 next, &dmadev->channels, chan.device_node) { 1758 list_del(&chan->chan.device_node); 1759 kfree(chan); 1760 } 1761 } 1762 1763 #ifdef CONFIG_DEBUG_FS 1764 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1765 { 1766 switch (state) { 1767 case PL08X_CHAN_IDLE: 1768 return "idle"; 1769 case PL08X_CHAN_RUNNING: 1770 return "running"; 1771 case PL08X_CHAN_PAUSED: 1772 return "paused"; 1773 case PL08X_CHAN_WAITING: 1774 return "waiting"; 1775 default: 1776 break; 1777 } 1778 return "UNKNOWN STATE"; 1779 } 1780 1781 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1782 { 1783 struct pl08x_driver_data *pl08x = s->private; 1784 struct pl08x_dma_chan *chan; 1785 struct pl08x_phy_chan *ch; 1786 unsigned long flags; 1787 int i; 1788 1789 seq_printf(s, "PL08x physical channels:\n"); 1790 seq_printf(s, "CHANNEL:\tUSER:\n"); 1791 seq_printf(s, "--------\t-----\n"); 1792 for (i = 0; i < pl08x->vd->channels; i++) { 1793 struct pl08x_dma_chan *virt_chan; 1794 1795 ch = &pl08x->phy_chans[i]; 1796 1797 spin_lock_irqsave(&ch->lock, flags); 1798 virt_chan = ch->serving; 1799 1800 seq_printf(s, "%d\t\t%s\n", 1801 ch->id, virt_chan ? virt_chan->name : "(none)"); 1802 1803 spin_unlock_irqrestore(&ch->lock, flags); 1804 } 1805 1806 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1807 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1808 seq_printf(s, "--------\t------\n"); 1809 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1810 seq_printf(s, "%s\t\t%s\n", chan->name, 1811 pl08x_state_str(chan->state)); 1812 } 1813 1814 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1815 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1816 seq_printf(s, "--------\t------\n"); 1817 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1818 seq_printf(s, "%s\t\t%s\n", chan->name, 1819 pl08x_state_str(chan->state)); 1820 } 1821 1822 return 0; 1823 } 1824 1825 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1826 { 1827 return single_open(file, pl08x_debugfs_show, inode->i_private); 1828 } 1829 1830 static const struct file_operations pl08x_debugfs_operations = { 1831 .open = pl08x_debugfs_open, 1832 .read = seq_read, 1833 .llseek = seq_lseek, 1834 .release = single_release, 1835 }; 1836 1837 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1838 { 1839 /* Expose a simple debugfs interface to view all clocks */ 1840 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1841 NULL, pl08x, 1842 &pl08x_debugfs_operations); 1843 } 1844 1845 #else 1846 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1847 { 1848 } 1849 #endif 1850 1851 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 1852 { 1853 struct pl08x_driver_data *pl08x; 1854 const struct vendor_data *vd = id->data; 1855 int ret = 0; 1856 int i; 1857 1858 ret = amba_request_regions(adev, NULL); 1859 if (ret) 1860 return ret; 1861 1862 /* Create the driver state holder */ 1863 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1864 if (!pl08x) { 1865 ret = -ENOMEM; 1866 goto out_no_pl08x; 1867 } 1868 1869 /* Initialize memcpy engine */ 1870 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1871 pl08x->memcpy.dev = &adev->dev; 1872 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1873 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1874 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1875 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1876 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1877 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1878 pl08x->memcpy.device_control = pl08x_control; 1879 1880 /* Initialize slave engine */ 1881 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1882 pl08x->slave.dev = &adev->dev; 1883 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1884 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1885 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1886 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1887 pl08x->slave.device_issue_pending = pl08x_issue_pending; 1888 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1889 pl08x->slave.device_control = pl08x_control; 1890 1891 /* Get the platform data */ 1892 pl08x->pd = dev_get_platdata(&adev->dev); 1893 if (!pl08x->pd) { 1894 dev_err(&adev->dev, "no platform data supplied\n"); 1895 goto out_no_platdata; 1896 } 1897 1898 /* Assign useful pointers to the driver state */ 1899 pl08x->adev = adev; 1900 pl08x->vd = vd; 1901 1902 /* By default, AHB1 only. If dualmaster, from platform */ 1903 pl08x->lli_buses = PL08X_AHB1; 1904 pl08x->mem_buses = PL08X_AHB1; 1905 if (pl08x->vd->dualmaster) { 1906 pl08x->lli_buses = pl08x->pd->lli_buses; 1907 pl08x->mem_buses = pl08x->pd->mem_buses; 1908 } 1909 1910 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1911 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1912 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1913 if (!pl08x->pool) { 1914 ret = -ENOMEM; 1915 goto out_no_lli_pool; 1916 } 1917 1918 spin_lock_init(&pl08x->lock); 1919 1920 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1921 if (!pl08x->base) { 1922 ret = -ENOMEM; 1923 goto out_no_ioremap; 1924 } 1925 1926 /* Turn on the PL08x */ 1927 pl08x_ensure_on(pl08x); 1928 1929 /* Attach the interrupt handler */ 1930 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1931 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1932 1933 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1934 DRIVER_NAME, pl08x); 1935 if (ret) { 1936 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1937 __func__, adev->irq[0]); 1938 goto out_no_irq; 1939 } 1940 1941 /* Initialize physical channels */ 1942 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1943 GFP_KERNEL); 1944 if (!pl08x->phy_chans) { 1945 dev_err(&adev->dev, "%s failed to allocate " 1946 "physical channel holders\n", 1947 __func__); 1948 goto out_no_phychans; 1949 } 1950 1951 for (i = 0; i < vd->channels; i++) { 1952 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 1953 1954 ch->id = i; 1955 ch->base = pl08x->base + PL080_Cx_BASE(i); 1956 spin_lock_init(&ch->lock); 1957 ch->serving = NULL; 1958 ch->signal = -1; 1959 dev_info(&adev->dev, 1960 "physical channel %d is %s\n", i, 1961 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1962 } 1963 1964 /* Register as many memcpy channels as there are physical channels */ 1965 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 1966 pl08x->vd->channels, false); 1967 if (ret <= 0) { 1968 dev_warn(&pl08x->adev->dev, 1969 "%s failed to enumerate memcpy channels - %d\n", 1970 __func__, ret); 1971 goto out_no_memcpy; 1972 } 1973 pl08x->memcpy.chancnt = ret; 1974 1975 /* Register slave channels */ 1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1977 pl08x->pd->num_slave_channels, 1978 true); 1979 if (ret <= 0) { 1980 dev_warn(&pl08x->adev->dev, 1981 "%s failed to enumerate slave channels - %d\n", 1982 __func__, ret); 1983 goto out_no_slave; 1984 } 1985 pl08x->slave.chancnt = ret; 1986 1987 ret = dma_async_device_register(&pl08x->memcpy); 1988 if (ret) { 1989 dev_warn(&pl08x->adev->dev, 1990 "%s failed to register memcpy as an async device - %d\n", 1991 __func__, ret); 1992 goto out_no_memcpy_reg; 1993 } 1994 1995 ret = dma_async_device_register(&pl08x->slave); 1996 if (ret) { 1997 dev_warn(&pl08x->adev->dev, 1998 "%s failed to register slave as an async device - %d\n", 1999 __func__, ret); 2000 goto out_no_slave_reg; 2001 } 2002 2003 amba_set_drvdata(adev, pl08x); 2004 init_pl08x_debugfs(pl08x); 2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 2006 amba_part(adev), amba_rev(adev), 2007 (unsigned long long)adev->res.start, adev->irq[0]); 2008 return 0; 2009 2010 out_no_slave_reg: 2011 dma_async_device_unregister(&pl08x->memcpy); 2012 out_no_memcpy_reg: 2013 pl08x_free_virtual_channels(&pl08x->slave); 2014 out_no_slave: 2015 pl08x_free_virtual_channels(&pl08x->memcpy); 2016 out_no_memcpy: 2017 kfree(pl08x->phy_chans); 2018 out_no_phychans: 2019 free_irq(adev->irq[0], pl08x); 2020 out_no_irq: 2021 iounmap(pl08x->base); 2022 out_no_ioremap: 2023 dma_pool_destroy(pl08x->pool); 2024 out_no_lli_pool: 2025 out_no_platdata: 2026 kfree(pl08x); 2027 out_no_pl08x: 2028 amba_release_regions(adev); 2029 return ret; 2030 } 2031 2032 /* PL080 has 8 channels and the PL080 have just 2 */ 2033 static struct vendor_data vendor_pl080 = { 2034 .channels = 8, 2035 .dualmaster = true, 2036 }; 2037 2038 static struct vendor_data vendor_pl081 = { 2039 .channels = 2, 2040 .dualmaster = false, 2041 }; 2042 2043 static struct amba_id pl08x_ids[] = { 2044 /* PL080 */ 2045 { 2046 .id = 0x00041080, 2047 .mask = 0x000fffff, 2048 .data = &vendor_pl080, 2049 }, 2050 /* PL081 */ 2051 { 2052 .id = 0x00041081, 2053 .mask = 0x000fffff, 2054 .data = &vendor_pl081, 2055 }, 2056 /* Nomadik 8815 PL080 variant */ 2057 { 2058 .id = 0x00280880, 2059 .mask = 0x00ffffff, 2060 .data = &vendor_pl080, 2061 }, 2062 { 0, 0 }, 2063 }; 2064 2065 static struct amba_driver pl08x_amba_driver = { 2066 .drv.name = DRIVER_NAME, 2067 .id_table = pl08x_ids, 2068 .probe = pl08x_probe, 2069 }; 2070 2071 static int __init pl08x_init(void) 2072 { 2073 int retval; 2074 retval = amba_driver_register(&pl08x_amba_driver); 2075 if (retval) 2076 printk(KERN_WARNING DRIVER_NAME 2077 "failed to register as an AMBA device (%d)\n", 2078 retval); 2079 return retval; 2080 } 2081 subsys_initcall(pl08x_init); 2082