1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * 22 * The full GNU General Public License is in this distribution in the file 23 * called COPYING. 24 * 25 * Documentation: ARM DDI 0196G == PL080 26 * Documentation: ARM DDI 0218E == PL081 27 * 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * channel. 30 * 31 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * has only two channels. So on these DMA controllers the number of channels 33 * and the number of incoming DMA signals are two totally different things. 34 * It is usually not possible to theoretically handle all physical signals, 35 * so a multiplexing scheme with possible denial of use is necessary. 36 * 37 * The PL080 has a dual bus master, PL081 has a single master. 38 * 39 * Memory to peripheral transfer may be visualized as 40 * Get data from memory to DMAC 41 * Until no data left 42 * On burst request from peripheral 43 * Destination burst from DMAC to peripheral 44 * Clear burst request 45 * Raise terminal count interrupt 46 * 47 * For peripherals with a FIFO: 48 * Source burst size == half the depth of the peripheral FIFO 49 * Destination burst size == the depth of the peripheral FIFO 50 * 51 * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 * signals, the DMA controller will simply facilitate its AHB master.) 53 * 54 * ASSUMES default (little) endianness for DMA transfers 55 * 56 * The PL08x has two flow control settings: 57 * - DMAC flow control: the transfer size defines the number of transfers 58 * which occur for the current LLI entry, and the DMAC raises TC at the 59 * end of every LLI entry. Observed behaviour shows the DMAC listening 60 * to both the BREQ and SREQ signals (contrary to documented), 61 * transferring data if either is active. The LBREQ and LSREQ signals 62 * are ignored. 63 * 64 * - Peripheral flow control: the transfer size is ignored (and should be 65 * zero). The data is transferred from the current LLI entry, until 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 * will then move to the next LLI entry. 68 * 69 * Only the former works sanely with scatter lists, so we only implement 70 * the DMAC flow control method. However, peripherals which use the LBREQ 71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through 72 * these hardware restrictions prevents them from using scatter DMA. 73 * 74 * Global TODO: 75 * - Break out common code from arch/arm/mach-s3c64xx and share 76 */ 77 #include <linux/device.h> 78 #include <linux/init.h> 79 #include <linux/module.h> 80 #include <linux/interrupt.h> 81 #include <linux/slab.h> 82 #include <linux/dmapool.h> 83 #include <linux/dmaengine.h> 84 #include <linux/amba/bus.h> 85 #include <linux/amba/pl08x.h> 86 #include <linux/debugfs.h> 87 #include <linux/seq_file.h> 88 89 #include <asm/hardware/pl080.h> 90 91 #define DRIVER_NAME "pl08xdmac" 92 93 /** 94 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 95 * @channels: the number of channels available in this variant 96 * @dualmaster: whether this version supports dual AHB masters or not. 97 */ 98 struct vendor_data { 99 u8 channels; 100 bool dualmaster; 101 }; 102 103 /* 104 * PL08X private data structures 105 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, 106 * start & end do not - their bus bit info is in cctl. Also note that these 107 * are fixed 32-bit quantities. 108 */ 109 struct pl08x_lli { 110 u32 src; 111 u32 dst; 112 u32 lli; 113 u32 cctl; 114 }; 115 116 /** 117 * struct pl08x_driver_data - the local state holder for the PL08x 118 * @slave: slave engine for this instance 119 * @memcpy: memcpy engine for this instance 120 * @base: virtual memory base (remapped) for the PL08x 121 * @adev: the corresponding AMBA (PrimeCell) bus entry 122 * @vd: vendor data for this PL08x variant 123 * @pd: platform data passed in from the platform/machine 124 * @phy_chans: array of data for the physical channels 125 * @pool: a pool for the LLI descriptors 126 * @pool_ctr: counter of LLIs in the pool 127 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 128 * @mem_buses: set to indicate memory transfers on AHB2. 129 * @lock: a spinlock for this struct 130 */ 131 struct pl08x_driver_data { 132 struct dma_device slave; 133 struct dma_device memcpy; 134 void __iomem *base; 135 struct amba_device *adev; 136 const struct vendor_data *vd; 137 struct pl08x_platform_data *pd; 138 struct pl08x_phy_chan *phy_chans; 139 struct dma_pool *pool; 140 int pool_ctr; 141 u8 lli_buses; 142 u8 mem_buses; 143 spinlock_t lock; 144 }; 145 146 /* 147 * PL08X specific defines 148 */ 149 150 /* 151 * Memory boundaries: the manual for PL08x says that the controller 152 * cannot read past a 1KiB boundary, so these defines are used to 153 * create transfer LLIs that do not cross such boundaries. 154 */ 155 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 156 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 157 158 /* Minimum period between work queue runs */ 159 #define PL08X_WQ_PERIODMIN 20 160 161 /* Size (bytes) of each LLI buffer allocated for one transfer */ 162 # define PL08X_LLI_TSFR_SIZE 0x2000 163 164 /* Maximum times we call dma_pool_alloc on this pool without freeing */ 165 #define PL08X_MAX_ALLOCS 0x40 166 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 167 #define PL08X_ALIGN 8 168 169 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 170 { 171 return container_of(chan, struct pl08x_dma_chan, chan); 172 } 173 174 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 175 { 176 return container_of(tx, struct pl08x_txd, tx); 177 } 178 179 /* 180 * Physical channel handling 181 */ 182 183 /* Whether a certain channel is busy or not */ 184 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 185 { 186 unsigned int val; 187 188 val = readl(ch->base + PL080_CH_CONFIG); 189 return val & PL080_CONFIG_ACTIVE; 190 } 191 192 /* 193 * Set the initial DMA register values i.e. those for the first LLI 194 * The next LLI pointer and the configuration interrupt bit have 195 * been set when the LLIs were constructed. Poke them into the hardware 196 * and start the transfer. 197 */ 198 static void pl08x_start_txd(struct pl08x_dma_chan *plchan, 199 struct pl08x_txd *txd) 200 { 201 struct pl08x_driver_data *pl08x = plchan->host; 202 struct pl08x_phy_chan *phychan = plchan->phychan; 203 struct pl08x_lli *lli = &txd->llis_va[0]; 204 u32 val; 205 206 plchan->at = txd; 207 208 /* Wait for channel inactive */ 209 while (pl08x_phy_channel_busy(phychan)) 210 cpu_relax(); 211 212 dev_vdbg(&pl08x->adev->dev, 213 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 214 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 215 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, 216 txd->ccfg); 217 218 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); 219 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); 220 writel(lli->lli, phychan->base + PL080_CH_LLI); 221 writel(lli->cctl, phychan->base + PL080_CH_CONTROL); 222 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); 223 224 /* Enable the DMA channel */ 225 /* Do not access config register until channel shows as disabled */ 226 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 227 cpu_relax(); 228 229 /* Do not access config register until channel shows as inactive */ 230 val = readl(phychan->base + PL080_CH_CONFIG); 231 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 232 val = readl(phychan->base + PL080_CH_CONFIG); 233 234 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 235 } 236 237 /* 238 * Overall DMAC remains enabled always. 239 * 240 * Disabling individual channels could lose data. 241 * 242 * Disable the peripheral DMA after disabling the DMAC in order to allow 243 * the DMAC FIFO to drain, and hence allow the channel to show inactive 244 */ 245 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 246 { 247 u32 val; 248 249 /* Set the HALT bit and wait for the FIFO to drain */ 250 val = readl(ch->base + PL080_CH_CONFIG); 251 val |= PL080_CONFIG_HALT; 252 writel(val, ch->base + PL080_CH_CONFIG); 253 254 /* Wait for channel inactive */ 255 while (pl08x_phy_channel_busy(ch)) 256 cpu_relax(); 257 } 258 259 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 260 { 261 u32 val; 262 263 /* Clear the HALT bit */ 264 val = readl(ch->base + PL080_CH_CONFIG); 265 val &= ~PL080_CONFIG_HALT; 266 writel(val, ch->base + PL080_CH_CONFIG); 267 } 268 269 270 /* Stops the channel */ 271 static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) 272 { 273 u32 val; 274 275 pl08x_pause_phy_chan(ch); 276 277 /* Disable channel */ 278 val = readl(ch->base + PL080_CH_CONFIG); 279 val &= ~PL080_CONFIG_ENABLE; 280 val &= ~PL080_CONFIG_ERR_IRQ_MASK; 281 val &= ~PL080_CONFIG_TC_IRQ_MASK; 282 writel(val, ch->base + PL080_CH_CONFIG); 283 } 284 285 static inline u32 get_bytes_in_cctl(u32 cctl) 286 { 287 /* The source width defines the number of bytes */ 288 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 289 290 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 291 case PL080_WIDTH_8BIT: 292 break; 293 case PL080_WIDTH_16BIT: 294 bytes *= 2; 295 break; 296 case PL080_WIDTH_32BIT: 297 bytes *= 4; 298 break; 299 } 300 return bytes; 301 } 302 303 /* The channel should be paused when calling this */ 304 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 305 { 306 struct pl08x_phy_chan *ch; 307 struct pl08x_txd *txd; 308 unsigned long flags; 309 size_t bytes = 0; 310 311 spin_lock_irqsave(&plchan->lock, flags); 312 ch = plchan->phychan; 313 txd = plchan->at; 314 315 /* 316 * Follow the LLIs to get the number of remaining 317 * bytes in the currently active transaction. 318 */ 319 if (ch && txd) { 320 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 321 322 /* First get the remaining bytes in the active transfer */ 323 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 324 325 if (clli) { 326 struct pl08x_lli *llis_va = txd->llis_va; 327 dma_addr_t llis_bus = txd->llis_bus; 328 int index; 329 330 BUG_ON(clli < llis_bus || clli >= llis_bus + 331 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 332 333 /* 334 * Locate the next LLI - as this is an array, 335 * it's simple maths to find. 336 */ 337 index = (clli - llis_bus) / sizeof(struct pl08x_lli); 338 339 for (; index < MAX_NUM_TSFR_LLIS; index++) { 340 bytes += get_bytes_in_cctl(llis_va[index].cctl); 341 342 /* 343 * A LLI pointer of 0 terminates the LLI list 344 */ 345 if (!llis_va[index].lli) 346 break; 347 } 348 } 349 } 350 351 /* Sum up all queued transactions */ 352 if (!list_empty(&plchan->pend_list)) { 353 struct pl08x_txd *txdi; 354 list_for_each_entry(txdi, &plchan->pend_list, node) { 355 bytes += txdi->len; 356 } 357 } 358 359 spin_unlock_irqrestore(&plchan->lock, flags); 360 361 return bytes; 362 } 363 364 /* 365 * Allocate a physical channel for a virtual channel 366 * 367 * Try to locate a physical channel to be used for this transfer. If all 368 * are taken return NULL and the requester will have to cope by using 369 * some fallback PIO mode or retrying later. 370 */ 371 static struct pl08x_phy_chan * 372 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 373 struct pl08x_dma_chan *virt_chan) 374 { 375 struct pl08x_phy_chan *ch = NULL; 376 unsigned long flags; 377 int i; 378 379 for (i = 0; i < pl08x->vd->channels; i++) { 380 ch = &pl08x->phy_chans[i]; 381 382 spin_lock_irqsave(&ch->lock, flags); 383 384 if (!ch->serving) { 385 ch->serving = virt_chan; 386 ch->signal = -1; 387 spin_unlock_irqrestore(&ch->lock, flags); 388 break; 389 } 390 391 spin_unlock_irqrestore(&ch->lock, flags); 392 } 393 394 if (i == pl08x->vd->channels) { 395 /* No physical channel available, cope with it */ 396 return NULL; 397 } 398 399 return ch; 400 } 401 402 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 403 struct pl08x_phy_chan *ch) 404 { 405 unsigned long flags; 406 407 /* Stop the channel and clear its interrupts */ 408 pl08x_stop_phy_chan(ch); 409 writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); 410 writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); 411 412 /* Mark it as free */ 413 spin_lock_irqsave(&ch->lock, flags); 414 ch->serving = NULL; 415 spin_unlock_irqrestore(&ch->lock, flags); 416 } 417 418 /* 419 * LLI handling 420 */ 421 422 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 423 { 424 switch (coded) { 425 case PL080_WIDTH_8BIT: 426 return 1; 427 case PL080_WIDTH_16BIT: 428 return 2; 429 case PL080_WIDTH_32BIT: 430 return 4; 431 default: 432 break; 433 } 434 BUG(); 435 return 0; 436 } 437 438 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 439 size_t tsize) 440 { 441 u32 retbits = cctl; 442 443 /* Remove all src, dst and transfer size bits */ 444 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 445 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 446 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 447 448 /* Then set the bits according to the parameters */ 449 switch (srcwidth) { 450 case 1: 451 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 452 break; 453 case 2: 454 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 455 break; 456 case 4: 457 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 458 break; 459 default: 460 BUG(); 461 break; 462 } 463 464 switch (dstwidth) { 465 case 1: 466 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 467 break; 468 case 2: 469 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 470 break; 471 case 4: 472 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 473 break; 474 default: 475 BUG(); 476 break; 477 } 478 479 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 480 return retbits; 481 } 482 483 struct pl08x_lli_build_data { 484 struct pl08x_txd *txd; 485 struct pl08x_driver_data *pl08x; 486 struct pl08x_bus_data srcbus; 487 struct pl08x_bus_data dstbus; 488 size_t remainder; 489 }; 490 491 /* 492 * Autoselect a master bus to use for the transfer this prefers the 493 * destination bus if both available if fixed address on one bus the 494 * other will be chosen 495 */ 496 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 497 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 498 { 499 if (!(cctl & PL080_CONTROL_DST_INCR)) { 500 *mbus = &bd->srcbus; 501 *sbus = &bd->dstbus; 502 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 503 *mbus = &bd->dstbus; 504 *sbus = &bd->srcbus; 505 } else { 506 if (bd->dstbus.buswidth == 4) { 507 *mbus = &bd->dstbus; 508 *sbus = &bd->srcbus; 509 } else if (bd->srcbus.buswidth == 4) { 510 *mbus = &bd->srcbus; 511 *sbus = &bd->dstbus; 512 } else if (bd->dstbus.buswidth == 2) { 513 *mbus = &bd->dstbus; 514 *sbus = &bd->srcbus; 515 } else if (bd->srcbus.buswidth == 2) { 516 *mbus = &bd->srcbus; 517 *sbus = &bd->dstbus; 518 } else { 519 /* bd->srcbus.buswidth == 1 */ 520 *mbus = &bd->dstbus; 521 *sbus = &bd->srcbus; 522 } 523 } 524 } 525 526 /* 527 * Fills in one LLI for a certain transfer descriptor and advance the counter 528 */ 529 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 530 int num_llis, int len, u32 cctl) 531 { 532 struct pl08x_lli *llis_va = bd->txd->llis_va; 533 dma_addr_t llis_bus = bd->txd->llis_bus; 534 535 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 536 537 llis_va[num_llis].cctl = cctl; 538 llis_va[num_llis].src = bd->srcbus.addr; 539 llis_va[num_llis].dst = bd->dstbus.addr; 540 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 541 if (bd->pl08x->lli_buses & PL08X_AHB2) 542 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; 543 544 if (cctl & PL080_CONTROL_SRC_INCR) 545 bd->srcbus.addr += len; 546 if (cctl & PL080_CONTROL_DST_INCR) 547 bd->dstbus.addr += len; 548 549 BUG_ON(bd->remainder < len); 550 551 bd->remainder -= len; 552 } 553 554 /* 555 * Return number of bytes to fill to boundary, or len. 556 * This calculation works for any value of addr. 557 */ 558 static inline size_t pl08x_pre_boundary(u32 addr, size_t len) 559 { 560 size_t boundary_len = PL08X_BOUNDARY_SIZE - 561 (addr & (PL08X_BOUNDARY_SIZE - 1)); 562 563 return min(boundary_len, len); 564 } 565 566 /* 567 * This fills in the table of LLIs for the transfer descriptor 568 * Note that we assume we never have to change the burst sizes 569 * Return 0 for error 570 */ 571 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 572 struct pl08x_txd *txd) 573 { 574 struct pl08x_bus_data *mbus, *sbus; 575 struct pl08x_lli_build_data bd; 576 int num_llis = 0; 577 u32 cctl; 578 size_t max_bytes_per_lli; 579 size_t total_bytes = 0; 580 struct pl08x_lli *llis_va; 581 582 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 583 &txd->llis_bus); 584 if (!txd->llis_va) { 585 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 586 return 0; 587 } 588 589 pl08x->pool_ctr++; 590 591 /* Get the default CCTL */ 592 cctl = txd->cctl; 593 594 bd.txd = txd; 595 bd.pl08x = pl08x; 596 bd.srcbus.addr = txd->src_addr; 597 bd.dstbus.addr = txd->dst_addr; 598 599 /* Find maximum width of the source bus */ 600 bd.srcbus.maxwidth = 601 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 602 PL080_CONTROL_SWIDTH_SHIFT); 603 604 /* Find maximum width of the destination bus */ 605 bd.dstbus.maxwidth = 606 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 607 PL080_CONTROL_DWIDTH_SHIFT); 608 609 /* Set up the bus widths to the maximum */ 610 bd.srcbus.buswidth = bd.srcbus.maxwidth; 611 bd.dstbus.buswidth = bd.dstbus.maxwidth; 612 dev_vdbg(&pl08x->adev->dev, 613 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", 614 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); 615 616 617 /* 618 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 619 */ 620 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 621 PL080_CONTROL_TRANSFER_SIZE_MASK; 622 dev_vdbg(&pl08x->adev->dev, 623 "%s max bytes per lli = %zu\n", 624 __func__, max_bytes_per_lli); 625 626 /* We need to count this down to zero */ 627 bd.remainder = txd->len; 628 dev_vdbg(&pl08x->adev->dev, 629 "%s remainder = %zu\n", 630 __func__, bd.remainder); 631 632 /* 633 * Choose bus to align to 634 * - prefers destination bus if both available 635 * - if fixed address on one bus chooses other 636 */ 637 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 638 639 if (txd->len < mbus->buswidth) { 640 /* Less than a bus width available - send as single bytes */ 641 while (bd.remainder) { 642 dev_vdbg(&pl08x->adev->dev, 643 "%s single byte LLIs for a transfer of " 644 "less than a bus width (remain 0x%08x)\n", 645 __func__, bd.remainder); 646 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 647 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 648 total_bytes++; 649 } 650 } else { 651 /* Make one byte LLIs until master bus is aligned */ 652 while ((mbus->addr) % (mbus->buswidth)) { 653 dev_vdbg(&pl08x->adev->dev, 654 "%s adjustment lli for less than bus width " 655 "(remain 0x%08x)\n", 656 __func__, bd.remainder); 657 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 658 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 659 total_bytes++; 660 } 661 662 /* 663 * Master now aligned 664 * - if slave is not then we must set its width down 665 */ 666 if (sbus->addr % sbus->buswidth) { 667 dev_dbg(&pl08x->adev->dev, 668 "%s set down bus width to one byte\n", 669 __func__); 670 671 sbus->buswidth = 1; 672 } 673 674 /* 675 * Make largest possible LLIs until less than one bus 676 * width left 677 */ 678 while (bd.remainder > (mbus->buswidth - 1)) { 679 size_t lli_len, target_len, tsize, odd_bytes; 680 681 /* 682 * If enough left try to send max possible, 683 * otherwise try to send the remainder 684 */ 685 target_len = min(bd.remainder, max_bytes_per_lli); 686 687 /* 688 * Set bus lengths for incrementing buses to the 689 * number of bytes which fill to next memory boundary, 690 * limiting on the target length calculated above. 691 */ 692 if (cctl & PL080_CONTROL_SRC_INCR) 693 bd.srcbus.fill_bytes = 694 pl08x_pre_boundary(bd.srcbus.addr, 695 target_len); 696 else 697 bd.srcbus.fill_bytes = target_len; 698 699 if (cctl & PL080_CONTROL_DST_INCR) 700 bd.dstbus.fill_bytes = 701 pl08x_pre_boundary(bd.dstbus.addr, 702 target_len); 703 else 704 bd.dstbus.fill_bytes = target_len; 705 706 /* Find the nearest */ 707 lli_len = min(bd.srcbus.fill_bytes, 708 bd.dstbus.fill_bytes); 709 710 BUG_ON(lli_len > bd.remainder); 711 712 if (lli_len <= 0) { 713 dev_err(&pl08x->adev->dev, 714 "%s lli_len is %zu, <= 0\n", 715 __func__, lli_len); 716 return 0; 717 } 718 719 if (lli_len == target_len) { 720 /* 721 * Can send what we wanted. 722 * Maintain alignment 723 */ 724 lli_len = (lli_len/mbus->buswidth) * 725 mbus->buswidth; 726 odd_bytes = 0; 727 } else { 728 /* 729 * So now we know how many bytes to transfer 730 * to get to the nearest boundary. The next 731 * LLI will past the boundary. However, we 732 * may be working to a boundary on the slave 733 * bus. We need to ensure the master stays 734 * aligned, and that we are working in 735 * multiples of the bus widths. 736 */ 737 odd_bytes = lli_len % mbus->buswidth; 738 lli_len -= odd_bytes; 739 740 } 741 742 if (lli_len) { 743 /* 744 * Check against minimum bus alignment: 745 * Calculate actual transfer size in relation 746 * to bus width an get a maximum remainder of 747 * the smallest bus width - 1 748 */ 749 /* FIXME: use round_down()? */ 750 tsize = lli_len / min(mbus->buswidth, 751 sbus->buswidth); 752 lli_len = tsize * min(mbus->buswidth, 753 sbus->buswidth); 754 755 if (target_len != lli_len) { 756 dev_vdbg(&pl08x->adev->dev, 757 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", 758 __func__, target_len, lli_len, txd->len); 759 } 760 761 cctl = pl08x_cctl_bits(cctl, 762 bd.srcbus.buswidth, 763 bd.dstbus.buswidth, 764 tsize); 765 766 dev_vdbg(&pl08x->adev->dev, 767 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 768 __func__, lli_len, bd.remainder); 769 pl08x_fill_lli_for_desc(&bd, num_llis++, 770 lli_len, cctl); 771 total_bytes += lli_len; 772 } 773 774 775 if (odd_bytes) { 776 /* 777 * Creep past the boundary, maintaining 778 * master alignment 779 */ 780 int j; 781 for (j = 0; (j < mbus->buswidth) 782 && (bd.remainder); j++) { 783 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 784 dev_vdbg(&pl08x->adev->dev, 785 "%s align with boundary, single byte (remain 0x%08zx)\n", 786 __func__, bd.remainder); 787 pl08x_fill_lli_for_desc(&bd, 788 num_llis++, 1, cctl); 789 total_bytes++; 790 } 791 } 792 } 793 794 /* 795 * Send any odd bytes 796 */ 797 while (bd.remainder) { 798 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 799 dev_vdbg(&pl08x->adev->dev, 800 "%s align with boundary, single odd byte (remain %zu)\n", 801 __func__, bd.remainder); 802 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 803 total_bytes++; 804 } 805 } 806 if (total_bytes != txd->len) { 807 dev_err(&pl08x->adev->dev, 808 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 809 __func__, total_bytes, txd->len); 810 return 0; 811 } 812 813 if (num_llis >= MAX_NUM_TSFR_LLIS) { 814 dev_err(&pl08x->adev->dev, 815 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 816 __func__, (u32) MAX_NUM_TSFR_LLIS); 817 return 0; 818 } 819 820 llis_va = txd->llis_va; 821 /* The final LLI terminates the LLI. */ 822 llis_va[num_llis - 1].lli = 0; 823 /* The final LLI element shall also fire an interrupt. */ 824 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 825 826 #ifdef VERBOSE_DEBUG 827 { 828 int i; 829 830 for (i = 0; i < num_llis; i++) { 831 dev_vdbg(&pl08x->adev->dev, 832 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", 833 i, 834 &llis_va[i], 835 llis_va[i].src, 836 llis_va[i].dst, 837 llis_va[i].cctl, 838 llis_va[i].lli 839 ); 840 } 841 } 842 #endif 843 844 return num_llis; 845 } 846 847 /* You should call this with the struct pl08x lock held */ 848 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 849 struct pl08x_txd *txd) 850 { 851 /* Free the LLI */ 852 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 853 854 pl08x->pool_ctr--; 855 856 kfree(txd); 857 } 858 859 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 860 struct pl08x_dma_chan *plchan) 861 { 862 struct pl08x_txd *txdi = NULL; 863 struct pl08x_txd *next; 864 865 if (!list_empty(&plchan->pend_list)) { 866 list_for_each_entry_safe(txdi, 867 next, &plchan->pend_list, node) { 868 list_del(&txdi->node); 869 pl08x_free_txd(pl08x, txdi); 870 } 871 } 872 } 873 874 /* 875 * The DMA ENGINE API 876 */ 877 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 878 { 879 return 0; 880 } 881 882 static void pl08x_free_chan_resources(struct dma_chan *chan) 883 { 884 } 885 886 /* 887 * This should be called with the channel plchan->lock held 888 */ 889 static int prep_phy_channel(struct pl08x_dma_chan *plchan, 890 struct pl08x_txd *txd) 891 { 892 struct pl08x_driver_data *pl08x = plchan->host; 893 struct pl08x_phy_chan *ch; 894 int ret; 895 896 /* Check if we already have a channel */ 897 if (plchan->phychan) 898 return 0; 899 900 ch = pl08x_get_phy_channel(pl08x, plchan); 901 if (!ch) { 902 /* No physical channel available, cope with it */ 903 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 904 return -EBUSY; 905 } 906 907 /* 908 * OK we have a physical channel: for memcpy() this is all we 909 * need, but for slaves the physical signals may be muxed! 910 * Can the platform allow us to use this channel? 911 */ 912 if (plchan->slave && 913 ch->signal < 0 && 914 pl08x->pd->get_signal) { 915 ret = pl08x->pd->get_signal(plchan); 916 if (ret < 0) { 917 dev_dbg(&pl08x->adev->dev, 918 "unable to use physical channel %d for transfer on %s due to platform restrictions\n", 919 ch->id, plchan->name); 920 /* Release physical channel & return */ 921 pl08x_put_phy_channel(pl08x, ch); 922 return -EBUSY; 923 } 924 ch->signal = ret; 925 926 /* Assign the flow control signal to this channel */ 927 if (txd->direction == DMA_TO_DEVICE) 928 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; 929 else if (txd->direction == DMA_FROM_DEVICE) 930 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; 931 } 932 933 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 934 ch->id, 935 ch->signal, 936 plchan->name); 937 938 plchan->phychan_hold++; 939 plchan->phychan = ch; 940 941 return 0; 942 } 943 944 static void release_phy_channel(struct pl08x_dma_chan *plchan) 945 { 946 struct pl08x_driver_data *pl08x = plchan->host; 947 948 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { 949 pl08x->pd->put_signal(plchan); 950 plchan->phychan->signal = -1; 951 } 952 pl08x_put_phy_channel(pl08x, plchan->phychan); 953 plchan->phychan = NULL; 954 } 955 956 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 957 { 958 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 959 struct pl08x_txd *txd = to_pl08x_txd(tx); 960 unsigned long flags; 961 962 spin_lock_irqsave(&plchan->lock, flags); 963 964 plchan->chan.cookie += 1; 965 if (plchan->chan.cookie < 0) 966 plchan->chan.cookie = 1; 967 tx->cookie = plchan->chan.cookie; 968 969 /* Put this onto the pending list */ 970 list_add_tail(&txd->node, &plchan->pend_list); 971 972 /* 973 * If there was no physical channel available for this memcpy, 974 * stack the request up and indicate that the channel is waiting 975 * for a free physical channel. 976 */ 977 if (!plchan->slave && !plchan->phychan) { 978 /* Do this memcpy whenever there is a channel ready */ 979 plchan->state = PL08X_CHAN_WAITING; 980 plchan->waiting = txd; 981 } else { 982 plchan->phychan_hold--; 983 } 984 985 spin_unlock_irqrestore(&plchan->lock, flags); 986 987 return tx->cookie; 988 } 989 990 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 991 struct dma_chan *chan, unsigned long flags) 992 { 993 struct dma_async_tx_descriptor *retval = NULL; 994 995 return retval; 996 } 997 998 /* 999 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1000 * If slaves are relying on interrupts to signal completion this function 1001 * must not be called with interrupts disabled. 1002 */ 1003 static enum dma_status 1004 pl08x_dma_tx_status(struct dma_chan *chan, 1005 dma_cookie_t cookie, 1006 struct dma_tx_state *txstate) 1007 { 1008 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1009 dma_cookie_t last_used; 1010 dma_cookie_t last_complete; 1011 enum dma_status ret; 1012 u32 bytesleft = 0; 1013 1014 last_used = plchan->chan.cookie; 1015 last_complete = plchan->lc; 1016 1017 ret = dma_async_is_complete(cookie, last_complete, last_used); 1018 if (ret == DMA_SUCCESS) { 1019 dma_set_tx_state(txstate, last_complete, last_used, 0); 1020 return ret; 1021 } 1022 1023 /* 1024 * This cookie not complete yet 1025 */ 1026 last_used = plchan->chan.cookie; 1027 last_complete = plchan->lc; 1028 1029 /* Get number of bytes left in the active transactions and queue */ 1030 bytesleft = pl08x_getbytes_chan(plchan); 1031 1032 dma_set_tx_state(txstate, last_complete, last_used, 1033 bytesleft); 1034 1035 if (plchan->state == PL08X_CHAN_PAUSED) 1036 return DMA_PAUSED; 1037 1038 /* Whether waiting or running, we're in progress */ 1039 return DMA_IN_PROGRESS; 1040 } 1041 1042 /* PrimeCell DMA extension */ 1043 struct burst_table { 1044 int burstwords; 1045 u32 reg; 1046 }; 1047 1048 static const struct burst_table burst_sizes[] = { 1049 { 1050 .burstwords = 256, 1051 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | 1052 (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), 1053 }, 1054 { 1055 .burstwords = 128, 1056 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | 1057 (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), 1058 }, 1059 { 1060 .burstwords = 64, 1061 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | 1062 (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), 1063 }, 1064 { 1065 .burstwords = 32, 1066 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | 1067 (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), 1068 }, 1069 { 1070 .burstwords = 16, 1071 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | 1072 (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), 1073 }, 1074 { 1075 .burstwords = 8, 1076 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | 1077 (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), 1078 }, 1079 { 1080 .burstwords = 4, 1081 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | 1082 (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), 1083 }, 1084 { 1085 .burstwords = 1, 1086 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1087 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), 1088 }, 1089 }; 1090 1091 static int dma_set_runtime_config(struct dma_chan *chan, 1092 struct dma_slave_config *config) 1093 { 1094 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1095 struct pl08x_driver_data *pl08x = plchan->host; 1096 struct pl08x_channel_data *cd = plchan->cd; 1097 enum dma_slave_buswidth addr_width; 1098 dma_addr_t addr; 1099 u32 maxburst; 1100 u32 cctl = 0; 1101 int i; 1102 1103 if (!plchan->slave) 1104 return -EINVAL; 1105 1106 /* Transfer direction */ 1107 plchan->runtime_direction = config->direction; 1108 if (config->direction == DMA_TO_DEVICE) { 1109 addr = config->dst_addr; 1110 addr_width = config->dst_addr_width; 1111 maxburst = config->dst_maxburst; 1112 } else if (config->direction == DMA_FROM_DEVICE) { 1113 addr = config->src_addr; 1114 addr_width = config->src_addr_width; 1115 maxburst = config->src_maxburst; 1116 } else { 1117 dev_err(&pl08x->adev->dev, 1118 "bad runtime_config: alien transfer direction\n"); 1119 return -EINVAL; 1120 } 1121 1122 switch (addr_width) { 1123 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1124 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1125 (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); 1126 break; 1127 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1128 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1129 (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); 1130 break; 1131 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1132 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1133 (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); 1134 break; 1135 default: 1136 dev_err(&pl08x->adev->dev, 1137 "bad runtime_config: alien address width\n"); 1138 return -EINVAL; 1139 } 1140 1141 /* 1142 * Now decide on a maxburst: 1143 * If this channel will only request single transfers, set this 1144 * down to ONE element. Also select one element if no maxburst 1145 * is specified. 1146 */ 1147 if (plchan->cd->single || maxburst == 0) { 1148 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1149 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1150 } else { 1151 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1152 if (burst_sizes[i].burstwords <= maxburst) 1153 break; 1154 cctl |= burst_sizes[i].reg; 1155 } 1156 1157 plchan->runtime_addr = addr; 1158 1159 /* Modify the default channel data to fit PrimeCell request */ 1160 cd->cctl = cctl; 1161 1162 dev_dbg(&pl08x->adev->dev, 1163 "configured channel %s (%s) for %s, data width %d, " 1164 "maxburst %d words, LE, CCTL=0x%08x\n", 1165 dma_chan_name(chan), plchan->name, 1166 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1167 addr_width, 1168 maxburst, 1169 cctl); 1170 1171 return 0; 1172 } 1173 1174 /* 1175 * Slave transactions callback to the slave device to allow 1176 * synchronization of slave DMA signals with the DMAC enable 1177 */ 1178 static void pl08x_issue_pending(struct dma_chan *chan) 1179 { 1180 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1181 unsigned long flags; 1182 1183 spin_lock_irqsave(&plchan->lock, flags); 1184 /* Something is already active, or we're waiting for a channel... */ 1185 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { 1186 spin_unlock_irqrestore(&plchan->lock, flags); 1187 return; 1188 } 1189 1190 /* Take the first element in the queue and execute it */ 1191 if (!list_empty(&plchan->pend_list)) { 1192 struct pl08x_txd *next; 1193 1194 next = list_first_entry(&plchan->pend_list, 1195 struct pl08x_txd, 1196 node); 1197 list_del(&next->node); 1198 plchan->state = PL08X_CHAN_RUNNING; 1199 1200 pl08x_start_txd(plchan, next); 1201 } 1202 1203 spin_unlock_irqrestore(&plchan->lock, flags); 1204 } 1205 1206 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1207 struct pl08x_txd *txd) 1208 { 1209 struct pl08x_driver_data *pl08x = plchan->host; 1210 unsigned long flags; 1211 int num_llis, ret; 1212 1213 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1214 if (!num_llis) { 1215 kfree(txd); 1216 return -EINVAL; 1217 } 1218 1219 spin_lock_irqsave(&plchan->lock, flags); 1220 1221 /* 1222 * See if we already have a physical channel allocated, 1223 * else this is the time to try to get one. 1224 */ 1225 ret = prep_phy_channel(plchan, txd); 1226 if (ret) { 1227 /* 1228 * No physical channel was available. 1229 * 1230 * memcpy transfers can be sorted out at submission time. 1231 * 1232 * Slave transfers may have been denied due to platform 1233 * channel muxing restrictions. Since there is no guarantee 1234 * that this will ever be resolved, and the signal must be 1235 * acquired AFTER acquiring the physical channel, we will let 1236 * them be NACK:ed with -EBUSY here. The drivers can retry 1237 * the prep() call if they are eager on doing this using DMA. 1238 */ 1239 if (plchan->slave) { 1240 pl08x_free_txd_list(pl08x, plchan); 1241 pl08x_free_txd(pl08x, txd); 1242 spin_unlock_irqrestore(&plchan->lock, flags); 1243 return -EBUSY; 1244 } 1245 } else 1246 /* 1247 * Else we're all set, paused and ready to roll, status 1248 * will switch to PL08X_CHAN_RUNNING when we call 1249 * issue_pending(). If there is something running on the 1250 * channel already we don't change its state. 1251 */ 1252 if (plchan->state == PL08X_CHAN_IDLE) 1253 plchan->state = PL08X_CHAN_PAUSED; 1254 1255 spin_unlock_irqrestore(&plchan->lock, flags); 1256 1257 return 0; 1258 } 1259 1260 /* 1261 * Given the source and destination available bus masks, select which 1262 * will be routed to each port. We try to have source and destination 1263 * on separate ports, but always respect the allowable settings. 1264 */ 1265 static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) 1266 { 1267 u32 cctl = 0; 1268 1269 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1270 cctl |= PL080_CONTROL_DST_AHB2; 1271 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1272 cctl |= PL080_CONTROL_SRC_AHB2; 1273 1274 return cctl; 1275 } 1276 1277 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1278 unsigned long flags) 1279 { 1280 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1281 1282 if (txd) { 1283 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1284 txd->tx.flags = flags; 1285 txd->tx.tx_submit = pl08x_tx_submit; 1286 INIT_LIST_HEAD(&txd->node); 1287 1288 /* Always enable error and terminal interrupts */ 1289 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1290 PL080_CONFIG_TC_IRQ_MASK; 1291 } 1292 return txd; 1293 } 1294 1295 /* 1296 * Initialize a descriptor to be used by memcpy submit 1297 */ 1298 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1299 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1300 size_t len, unsigned long flags) 1301 { 1302 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1303 struct pl08x_driver_data *pl08x = plchan->host; 1304 struct pl08x_txd *txd; 1305 int ret; 1306 1307 txd = pl08x_get_txd(plchan, flags); 1308 if (!txd) { 1309 dev_err(&pl08x->adev->dev, 1310 "%s no memory for descriptor\n", __func__); 1311 return NULL; 1312 } 1313 1314 txd->direction = DMA_NONE; 1315 txd->src_addr = src; 1316 txd->dst_addr = dest; 1317 txd->len = len; 1318 1319 /* Set platform data for m2m */ 1320 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1321 txd->cctl = pl08x->pd->memcpy_channel.cctl & 1322 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1323 1324 /* Both to be incremented or the code will break */ 1325 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1326 1327 if (pl08x->vd->dualmaster) 1328 txd->cctl |= pl08x_select_bus(pl08x, 1329 pl08x->mem_buses, pl08x->mem_buses); 1330 1331 ret = pl08x_prep_channel_resources(plchan, txd); 1332 if (ret) 1333 return NULL; 1334 1335 return &txd->tx; 1336 } 1337 1338 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1339 struct dma_chan *chan, struct scatterlist *sgl, 1340 unsigned int sg_len, enum dma_data_direction direction, 1341 unsigned long flags) 1342 { 1343 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1344 struct pl08x_driver_data *pl08x = plchan->host; 1345 struct pl08x_txd *txd; 1346 u8 src_buses, dst_buses; 1347 int ret; 1348 1349 /* 1350 * Current implementation ASSUMES only one sg 1351 */ 1352 if (sg_len != 1) { 1353 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", 1354 __func__); 1355 BUG(); 1356 } 1357 1358 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1359 __func__, sgl->length, plchan->name); 1360 1361 txd = pl08x_get_txd(plchan, flags); 1362 if (!txd) { 1363 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1364 return NULL; 1365 } 1366 1367 if (direction != plchan->runtime_direction) 1368 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1369 "the direction configured for the PrimeCell\n", 1370 __func__); 1371 1372 /* 1373 * Set up addresses, the PrimeCell configured address 1374 * will take precedence since this may configure the 1375 * channel target address dynamically at runtime. 1376 */ 1377 txd->direction = direction; 1378 txd->len = sgl->length; 1379 1380 txd->cctl = plchan->cd->cctl & 1381 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1382 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1383 PL080_CONTROL_PROT_MASK); 1384 1385 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1386 txd->cctl |= PL080_CONTROL_PROT_SYS; 1387 1388 if (direction == DMA_TO_DEVICE) { 1389 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1390 txd->cctl |= PL080_CONTROL_SRC_INCR; 1391 txd->src_addr = sgl->dma_address; 1392 if (plchan->runtime_addr) 1393 txd->dst_addr = plchan->runtime_addr; 1394 else 1395 txd->dst_addr = plchan->cd->addr; 1396 src_buses = pl08x->mem_buses; 1397 dst_buses = plchan->cd->periph_buses; 1398 } else if (direction == DMA_FROM_DEVICE) { 1399 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1400 txd->cctl |= PL080_CONTROL_DST_INCR; 1401 if (plchan->runtime_addr) 1402 txd->src_addr = plchan->runtime_addr; 1403 else 1404 txd->src_addr = plchan->cd->addr; 1405 txd->dst_addr = sgl->dma_address; 1406 src_buses = plchan->cd->periph_buses; 1407 dst_buses = pl08x->mem_buses; 1408 } else { 1409 dev_err(&pl08x->adev->dev, 1410 "%s direction unsupported\n", __func__); 1411 return NULL; 1412 } 1413 1414 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); 1415 1416 ret = pl08x_prep_channel_resources(plchan, txd); 1417 if (ret) 1418 return NULL; 1419 1420 return &txd->tx; 1421 } 1422 1423 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1424 unsigned long arg) 1425 { 1426 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1427 struct pl08x_driver_data *pl08x = plchan->host; 1428 unsigned long flags; 1429 int ret = 0; 1430 1431 /* Controls applicable to inactive channels */ 1432 if (cmd == DMA_SLAVE_CONFIG) { 1433 return dma_set_runtime_config(chan, 1434 (struct dma_slave_config *)arg); 1435 } 1436 1437 /* 1438 * Anything succeeds on channels with no physical allocation and 1439 * no queued transfers. 1440 */ 1441 spin_lock_irqsave(&plchan->lock, flags); 1442 if (!plchan->phychan && !plchan->at) { 1443 spin_unlock_irqrestore(&plchan->lock, flags); 1444 return 0; 1445 } 1446 1447 switch (cmd) { 1448 case DMA_TERMINATE_ALL: 1449 plchan->state = PL08X_CHAN_IDLE; 1450 1451 if (plchan->phychan) { 1452 pl08x_stop_phy_chan(plchan->phychan); 1453 1454 /* 1455 * Mark physical channel as free and free any slave 1456 * signal 1457 */ 1458 release_phy_channel(plchan); 1459 } 1460 /* Dequeue jobs and free LLIs */ 1461 if (plchan->at) { 1462 pl08x_free_txd(pl08x, plchan->at); 1463 plchan->at = NULL; 1464 } 1465 /* Dequeue jobs not yet fired as well */ 1466 pl08x_free_txd_list(pl08x, plchan); 1467 break; 1468 case DMA_PAUSE: 1469 pl08x_pause_phy_chan(plchan->phychan); 1470 plchan->state = PL08X_CHAN_PAUSED; 1471 break; 1472 case DMA_RESUME: 1473 pl08x_resume_phy_chan(plchan->phychan); 1474 plchan->state = PL08X_CHAN_RUNNING; 1475 break; 1476 default: 1477 /* Unknown command */ 1478 ret = -ENXIO; 1479 break; 1480 } 1481 1482 spin_unlock_irqrestore(&plchan->lock, flags); 1483 1484 return ret; 1485 } 1486 1487 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1488 { 1489 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1490 char *name = chan_id; 1491 1492 /* Check that the channel is not taken! */ 1493 if (!strcmp(plchan->name, name)) 1494 return true; 1495 1496 return false; 1497 } 1498 1499 /* 1500 * Just check that the device is there and active 1501 * TODO: turn this bit on/off depending on the number of physical channels 1502 * actually used, if it is zero... well shut it off. That will save some 1503 * power. Cut the clock at the same time. 1504 */ 1505 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1506 { 1507 u32 val; 1508 1509 val = readl(pl08x->base + PL080_CONFIG); 1510 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1511 /* We implicitly clear bit 1 and that means little-endian mode */ 1512 val |= PL080_CONFIG_ENABLE; 1513 writel(val, pl08x->base + PL080_CONFIG); 1514 } 1515 1516 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1517 { 1518 struct device *dev = txd->tx.chan->device->dev; 1519 1520 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1521 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1522 dma_unmap_single(dev, txd->src_addr, txd->len, 1523 DMA_TO_DEVICE); 1524 else 1525 dma_unmap_page(dev, txd->src_addr, txd->len, 1526 DMA_TO_DEVICE); 1527 } 1528 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1529 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1530 dma_unmap_single(dev, txd->dst_addr, txd->len, 1531 DMA_FROM_DEVICE); 1532 else 1533 dma_unmap_page(dev, txd->dst_addr, txd->len, 1534 DMA_FROM_DEVICE); 1535 } 1536 } 1537 1538 static void pl08x_tasklet(unsigned long data) 1539 { 1540 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1541 struct pl08x_driver_data *pl08x = plchan->host; 1542 struct pl08x_txd *txd; 1543 unsigned long flags; 1544 1545 spin_lock_irqsave(&plchan->lock, flags); 1546 1547 txd = plchan->at; 1548 plchan->at = NULL; 1549 1550 if (txd) { 1551 /* Update last completed */ 1552 plchan->lc = txd->tx.cookie; 1553 } 1554 1555 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1556 if (!list_empty(&plchan->pend_list)) { 1557 struct pl08x_txd *next; 1558 1559 next = list_first_entry(&plchan->pend_list, 1560 struct pl08x_txd, 1561 node); 1562 list_del(&next->node); 1563 1564 pl08x_start_txd(plchan, next); 1565 } else if (plchan->phychan_hold) { 1566 /* 1567 * This channel is still in use - we have a new txd being 1568 * prepared and will soon be queued. Don't give up the 1569 * physical channel. 1570 */ 1571 } else { 1572 struct pl08x_dma_chan *waiting = NULL; 1573 1574 /* 1575 * No more jobs, so free up the physical channel 1576 * Free any allocated signal on slave transfers too 1577 */ 1578 release_phy_channel(plchan); 1579 plchan->state = PL08X_CHAN_IDLE; 1580 1581 /* 1582 * And NOW before anyone else can grab that free:d up 1583 * physical channel, see if there is some memcpy pending 1584 * that seriously needs to start because of being stacked 1585 * up while we were choking the physical channels with data. 1586 */ 1587 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1588 chan.device_node) { 1589 if (waiting->state == PL08X_CHAN_WAITING && 1590 waiting->waiting != NULL) { 1591 int ret; 1592 1593 /* This should REALLY not fail now */ 1594 ret = prep_phy_channel(waiting, 1595 waiting->waiting); 1596 BUG_ON(ret); 1597 waiting->phychan_hold--; 1598 waiting->state = PL08X_CHAN_RUNNING; 1599 waiting->waiting = NULL; 1600 pl08x_issue_pending(&waiting->chan); 1601 break; 1602 } 1603 } 1604 } 1605 1606 spin_unlock_irqrestore(&plchan->lock, flags); 1607 1608 if (txd) { 1609 dma_async_tx_callback callback = txd->tx.callback; 1610 void *callback_param = txd->tx.callback_param; 1611 1612 /* Don't try to unmap buffers on slave channels */ 1613 if (!plchan->slave) 1614 pl08x_unmap_buffers(txd); 1615 1616 /* Free the descriptor */ 1617 spin_lock_irqsave(&plchan->lock, flags); 1618 pl08x_free_txd(pl08x, txd); 1619 spin_unlock_irqrestore(&plchan->lock, flags); 1620 1621 /* Callback to signal completion */ 1622 if (callback) 1623 callback(callback_param); 1624 } 1625 } 1626 1627 static irqreturn_t pl08x_irq(int irq, void *dev) 1628 { 1629 struct pl08x_driver_data *pl08x = dev; 1630 u32 mask = 0; 1631 u32 val; 1632 int i; 1633 1634 val = readl(pl08x->base + PL080_ERR_STATUS); 1635 if (val) { 1636 /* An error interrupt (on one or more channels) */ 1637 dev_err(&pl08x->adev->dev, 1638 "%s error interrupt, register value 0x%08x\n", 1639 __func__, val); 1640 /* 1641 * Simply clear ALL PL08X error interrupts, 1642 * regardless of channel and cause 1643 * FIXME: should be 0x00000003 on PL081 really. 1644 */ 1645 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1646 } 1647 val = readl(pl08x->base + PL080_INT_STATUS); 1648 for (i = 0; i < pl08x->vd->channels; i++) { 1649 if ((1 << i) & val) { 1650 /* Locate physical channel */ 1651 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1652 struct pl08x_dma_chan *plchan = phychan->serving; 1653 1654 /* Schedule tasklet on this channel */ 1655 tasklet_schedule(&plchan->tasklet); 1656 1657 mask |= (1 << i); 1658 } 1659 } 1660 /* Clear only the terminal interrupts on channels we processed */ 1661 writel(mask, pl08x->base + PL080_TC_CLEAR); 1662 1663 return mask ? IRQ_HANDLED : IRQ_NONE; 1664 } 1665 1666 /* 1667 * Initialise the DMAC memcpy/slave channels. 1668 * Make a local wrapper to hold required data 1669 */ 1670 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1671 struct dma_device *dmadev, 1672 unsigned int channels, 1673 bool slave) 1674 { 1675 struct pl08x_dma_chan *chan; 1676 int i; 1677 1678 INIT_LIST_HEAD(&dmadev->channels); 1679 1680 /* 1681 * Register as many many memcpy as we have physical channels, 1682 * we won't always be able to use all but the code will have 1683 * to cope with that situation. 1684 */ 1685 for (i = 0; i < channels; i++) { 1686 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1687 if (!chan) { 1688 dev_err(&pl08x->adev->dev, 1689 "%s no memory for channel\n", __func__); 1690 return -ENOMEM; 1691 } 1692 1693 chan->host = pl08x; 1694 chan->state = PL08X_CHAN_IDLE; 1695 1696 if (slave) { 1697 chan->slave = true; 1698 chan->name = pl08x->pd->slave_channels[i].bus_id; 1699 chan->cd = &pl08x->pd->slave_channels[i]; 1700 } else { 1701 chan->cd = &pl08x->pd->memcpy_channel; 1702 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1703 if (!chan->name) { 1704 kfree(chan); 1705 return -ENOMEM; 1706 } 1707 } 1708 if (chan->cd->circular_buffer) { 1709 dev_err(&pl08x->adev->dev, 1710 "channel %s: circular buffers not supported\n", 1711 chan->name); 1712 kfree(chan); 1713 continue; 1714 } 1715 dev_info(&pl08x->adev->dev, 1716 "initialize virtual channel \"%s\"\n", 1717 chan->name); 1718 1719 chan->chan.device = dmadev; 1720 chan->chan.cookie = 0; 1721 chan->lc = 0; 1722 1723 spin_lock_init(&chan->lock); 1724 INIT_LIST_HEAD(&chan->pend_list); 1725 tasklet_init(&chan->tasklet, pl08x_tasklet, 1726 (unsigned long) chan); 1727 1728 list_add_tail(&chan->chan.device_node, &dmadev->channels); 1729 } 1730 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1731 i, slave ? "slave" : "memcpy"); 1732 return i; 1733 } 1734 1735 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1736 { 1737 struct pl08x_dma_chan *chan = NULL; 1738 struct pl08x_dma_chan *next; 1739 1740 list_for_each_entry_safe(chan, 1741 next, &dmadev->channels, chan.device_node) { 1742 list_del(&chan->chan.device_node); 1743 kfree(chan); 1744 } 1745 } 1746 1747 #ifdef CONFIG_DEBUG_FS 1748 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1749 { 1750 switch (state) { 1751 case PL08X_CHAN_IDLE: 1752 return "idle"; 1753 case PL08X_CHAN_RUNNING: 1754 return "running"; 1755 case PL08X_CHAN_PAUSED: 1756 return "paused"; 1757 case PL08X_CHAN_WAITING: 1758 return "waiting"; 1759 default: 1760 break; 1761 } 1762 return "UNKNOWN STATE"; 1763 } 1764 1765 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1766 { 1767 struct pl08x_driver_data *pl08x = s->private; 1768 struct pl08x_dma_chan *chan; 1769 struct pl08x_phy_chan *ch; 1770 unsigned long flags; 1771 int i; 1772 1773 seq_printf(s, "PL08x physical channels:\n"); 1774 seq_printf(s, "CHANNEL:\tUSER:\n"); 1775 seq_printf(s, "--------\t-----\n"); 1776 for (i = 0; i < pl08x->vd->channels; i++) { 1777 struct pl08x_dma_chan *virt_chan; 1778 1779 ch = &pl08x->phy_chans[i]; 1780 1781 spin_lock_irqsave(&ch->lock, flags); 1782 virt_chan = ch->serving; 1783 1784 seq_printf(s, "%d\t\t%s\n", 1785 ch->id, virt_chan ? virt_chan->name : "(none)"); 1786 1787 spin_unlock_irqrestore(&ch->lock, flags); 1788 } 1789 1790 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1791 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1792 seq_printf(s, "--------\t------\n"); 1793 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1794 seq_printf(s, "%s\t\t%s\n", chan->name, 1795 pl08x_state_str(chan->state)); 1796 } 1797 1798 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1799 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1800 seq_printf(s, "--------\t------\n"); 1801 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1802 seq_printf(s, "%s\t\t%s\n", chan->name, 1803 pl08x_state_str(chan->state)); 1804 } 1805 1806 return 0; 1807 } 1808 1809 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1810 { 1811 return single_open(file, pl08x_debugfs_show, inode->i_private); 1812 } 1813 1814 static const struct file_operations pl08x_debugfs_operations = { 1815 .open = pl08x_debugfs_open, 1816 .read = seq_read, 1817 .llseek = seq_lseek, 1818 .release = single_release, 1819 }; 1820 1821 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1822 { 1823 /* Expose a simple debugfs interface to view all clocks */ 1824 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1825 NULL, pl08x, 1826 &pl08x_debugfs_operations); 1827 } 1828 1829 #else 1830 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1831 { 1832 } 1833 #endif 1834 1835 static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1836 { 1837 struct pl08x_driver_data *pl08x; 1838 const struct vendor_data *vd = id->data; 1839 int ret = 0; 1840 int i; 1841 1842 ret = amba_request_regions(adev, NULL); 1843 if (ret) 1844 return ret; 1845 1846 /* Create the driver state holder */ 1847 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1848 if (!pl08x) { 1849 ret = -ENOMEM; 1850 goto out_no_pl08x; 1851 } 1852 1853 /* Initialize memcpy engine */ 1854 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1855 pl08x->memcpy.dev = &adev->dev; 1856 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1857 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1858 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1859 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1860 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1861 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1862 pl08x->memcpy.device_control = pl08x_control; 1863 1864 /* Initialize slave engine */ 1865 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1866 pl08x->slave.dev = &adev->dev; 1867 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1868 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1869 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1870 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1871 pl08x->slave.device_issue_pending = pl08x_issue_pending; 1872 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1873 pl08x->slave.device_control = pl08x_control; 1874 1875 /* Get the platform data */ 1876 pl08x->pd = dev_get_platdata(&adev->dev); 1877 if (!pl08x->pd) { 1878 dev_err(&adev->dev, "no platform data supplied\n"); 1879 goto out_no_platdata; 1880 } 1881 1882 /* Assign useful pointers to the driver state */ 1883 pl08x->adev = adev; 1884 pl08x->vd = vd; 1885 1886 /* By default, AHB1 only. If dualmaster, from platform */ 1887 pl08x->lli_buses = PL08X_AHB1; 1888 pl08x->mem_buses = PL08X_AHB1; 1889 if (pl08x->vd->dualmaster) { 1890 pl08x->lli_buses = pl08x->pd->lli_buses; 1891 pl08x->mem_buses = pl08x->pd->mem_buses; 1892 } 1893 1894 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1895 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1896 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1897 if (!pl08x->pool) { 1898 ret = -ENOMEM; 1899 goto out_no_lli_pool; 1900 } 1901 1902 spin_lock_init(&pl08x->lock); 1903 1904 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1905 if (!pl08x->base) { 1906 ret = -ENOMEM; 1907 goto out_no_ioremap; 1908 } 1909 1910 /* Turn on the PL08x */ 1911 pl08x_ensure_on(pl08x); 1912 1913 /* Attach the interrupt handler */ 1914 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1915 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1916 1917 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1918 DRIVER_NAME, pl08x); 1919 if (ret) { 1920 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1921 __func__, adev->irq[0]); 1922 goto out_no_irq; 1923 } 1924 1925 /* Initialize physical channels */ 1926 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1927 GFP_KERNEL); 1928 if (!pl08x->phy_chans) { 1929 dev_err(&adev->dev, "%s failed to allocate " 1930 "physical channel holders\n", 1931 __func__); 1932 goto out_no_phychans; 1933 } 1934 1935 for (i = 0; i < vd->channels; i++) { 1936 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 1937 1938 ch->id = i; 1939 ch->base = pl08x->base + PL080_Cx_BASE(i); 1940 spin_lock_init(&ch->lock); 1941 ch->serving = NULL; 1942 ch->signal = -1; 1943 dev_info(&adev->dev, 1944 "physical channel %d is %s\n", i, 1945 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1946 } 1947 1948 /* Register as many memcpy channels as there are physical channels */ 1949 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 1950 pl08x->vd->channels, false); 1951 if (ret <= 0) { 1952 dev_warn(&pl08x->adev->dev, 1953 "%s failed to enumerate memcpy channels - %d\n", 1954 __func__, ret); 1955 goto out_no_memcpy; 1956 } 1957 pl08x->memcpy.chancnt = ret; 1958 1959 /* Register slave channels */ 1960 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1961 pl08x->pd->num_slave_channels, 1962 true); 1963 if (ret <= 0) { 1964 dev_warn(&pl08x->adev->dev, 1965 "%s failed to enumerate slave channels - %d\n", 1966 __func__, ret); 1967 goto out_no_slave; 1968 } 1969 pl08x->slave.chancnt = ret; 1970 1971 ret = dma_async_device_register(&pl08x->memcpy); 1972 if (ret) { 1973 dev_warn(&pl08x->adev->dev, 1974 "%s failed to register memcpy as an async device - %d\n", 1975 __func__, ret); 1976 goto out_no_memcpy_reg; 1977 } 1978 1979 ret = dma_async_device_register(&pl08x->slave); 1980 if (ret) { 1981 dev_warn(&pl08x->adev->dev, 1982 "%s failed to register slave as an async device - %d\n", 1983 __func__, ret); 1984 goto out_no_slave_reg; 1985 } 1986 1987 amba_set_drvdata(adev, pl08x); 1988 init_pl08x_debugfs(pl08x); 1989 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1990 amba_part(adev), amba_rev(adev), 1991 (unsigned long long)adev->res.start, adev->irq[0]); 1992 return 0; 1993 1994 out_no_slave_reg: 1995 dma_async_device_unregister(&pl08x->memcpy); 1996 out_no_memcpy_reg: 1997 pl08x_free_virtual_channels(&pl08x->slave); 1998 out_no_slave: 1999 pl08x_free_virtual_channels(&pl08x->memcpy); 2000 out_no_memcpy: 2001 kfree(pl08x->phy_chans); 2002 out_no_phychans: 2003 free_irq(adev->irq[0], pl08x); 2004 out_no_irq: 2005 iounmap(pl08x->base); 2006 out_no_ioremap: 2007 dma_pool_destroy(pl08x->pool); 2008 out_no_lli_pool: 2009 out_no_platdata: 2010 kfree(pl08x); 2011 out_no_pl08x: 2012 amba_release_regions(adev); 2013 return ret; 2014 } 2015 2016 /* PL080 has 8 channels and the PL080 have just 2 */ 2017 static struct vendor_data vendor_pl080 = { 2018 .channels = 8, 2019 .dualmaster = true, 2020 }; 2021 2022 static struct vendor_data vendor_pl081 = { 2023 .channels = 2, 2024 .dualmaster = false, 2025 }; 2026 2027 static struct amba_id pl08x_ids[] = { 2028 /* PL080 */ 2029 { 2030 .id = 0x00041080, 2031 .mask = 0x000fffff, 2032 .data = &vendor_pl080, 2033 }, 2034 /* PL081 */ 2035 { 2036 .id = 0x00041081, 2037 .mask = 0x000fffff, 2038 .data = &vendor_pl081, 2039 }, 2040 /* Nomadik 8815 PL080 variant */ 2041 { 2042 .id = 0x00280880, 2043 .mask = 0x00ffffff, 2044 .data = &vendor_pl080, 2045 }, 2046 { 0, 0 }, 2047 }; 2048 2049 static struct amba_driver pl08x_amba_driver = { 2050 .drv.name = DRIVER_NAME, 2051 .id_table = pl08x_ids, 2052 .probe = pl08x_probe, 2053 }; 2054 2055 static int __init pl08x_init(void) 2056 { 2057 int retval; 2058 retval = amba_driver_register(&pl08x_amba_driver); 2059 if (retval) 2060 printk(KERN_WARNING DRIVER_NAME 2061 "failed to register as an AMBA device (%d)\n", 2062 retval); 2063 return retval; 2064 } 2065 subsys_initcall(pl08x_init); 2066