1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * 22 * The full GNU General Public License is in this distribution in the file 23 * called COPYING. 24 * 25 * Documentation: ARM DDI 0196G == PL080 26 * Documentation: ARM DDI 0218E == PL081 27 * 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * channel. 30 * 31 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * has only two channels. So on these DMA controllers the number of channels 33 * and the number of incoming DMA signals are two totally different things. 34 * It is usually not possible to theoretically handle all physical signals, 35 * so a multiplexing scheme with possible denial of use is necessary. 36 * 37 * The PL080 has a dual bus master, PL081 has a single master. 38 * 39 * Memory to peripheral transfer may be visualized as 40 * Get data from memory to DMAC 41 * Until no data left 42 * On burst request from peripheral 43 * Destination burst from DMAC to peripheral 44 * Clear burst request 45 * Raise terminal count interrupt 46 * 47 * For peripherals with a FIFO: 48 * Source burst size == half the depth of the peripheral FIFO 49 * Destination burst size == the depth of the peripheral FIFO 50 * 51 * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 * signals, the DMA controller will simply facilitate its AHB master.) 53 * 54 * ASSUMES default (little) endianness for DMA transfers 55 * 56 * The PL08x has two flow control settings: 57 * - DMAC flow control: the transfer size defines the number of transfers 58 * which occur for the current LLI entry, and the DMAC raises TC at the 59 * end of every LLI entry. Observed behaviour shows the DMAC listening 60 * to both the BREQ and SREQ signals (contrary to documented), 61 * transferring data if either is active. The LBREQ and LSREQ signals 62 * are ignored. 63 * 64 * - Peripheral flow control: the transfer size is ignored (and should be 65 * zero). The data is transferred from the current LLI entry, until 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 * will then move to the next LLI entry. 68 * 69 * Global TODO: 70 * - Break out common code from arch/arm/mach-s3c64xx and share 71 */ 72 #include <linux/amba/bus.h> 73 #include <linux/amba/pl08x.h> 74 #include <linux/debugfs.h> 75 #include <linux/delay.h> 76 #include <linux/device.h> 77 #include <linux/dmaengine.h> 78 #include <linux/dmapool.h> 79 #include <linux/dma-mapping.h> 80 #include <linux/init.h> 81 #include <linux/interrupt.h> 82 #include <linux/module.h> 83 #include <linux/pm_runtime.h> 84 #include <linux/seq_file.h> 85 #include <linux/slab.h> 86 #include <asm/hardware/pl080.h> 87 88 #include "dmaengine.h" 89 90 #define DRIVER_NAME "pl08xdmac" 91 92 static struct amba_driver pl08x_amba_driver; 93 94 /** 95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 96 * @channels: the number of channels available in this variant 97 * @dualmaster: whether this version supports dual AHB masters or not. 98 * @nomadik: whether the channels have Nomadik security extension bits 99 * that need to be checked for permission before use and some registers are 100 * missing 101 */ 102 struct vendor_data { 103 u8 channels; 104 bool dualmaster; 105 bool nomadik; 106 }; 107 108 /* 109 * PL08X private data structures 110 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, 111 * start & end do not - their bus bit info is in cctl. Also note that these 112 * are fixed 32-bit quantities. 113 */ 114 struct pl08x_lli { 115 u32 src; 116 u32 dst; 117 u32 lli; 118 u32 cctl; 119 }; 120 121 /** 122 * struct pl08x_driver_data - the local state holder for the PL08x 123 * @slave: slave engine for this instance 124 * @memcpy: memcpy engine for this instance 125 * @base: virtual memory base (remapped) for the PL08x 126 * @adev: the corresponding AMBA (PrimeCell) bus entry 127 * @vd: vendor data for this PL08x variant 128 * @pd: platform data passed in from the platform/machine 129 * @phy_chans: array of data for the physical channels 130 * @pool: a pool for the LLI descriptors 131 * @pool_ctr: counter of LLIs in the pool 132 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 133 * fetches 134 * @mem_buses: set to indicate memory transfers on AHB2. 135 * @lock: a spinlock for this struct 136 */ 137 struct pl08x_driver_data { 138 struct dma_device slave; 139 struct dma_device memcpy; 140 void __iomem *base; 141 struct amba_device *adev; 142 const struct vendor_data *vd; 143 struct pl08x_platform_data *pd; 144 struct pl08x_phy_chan *phy_chans; 145 struct dma_pool *pool; 146 int pool_ctr; 147 u8 lli_buses; 148 u8 mem_buses; 149 spinlock_t lock; 150 }; 151 152 /* 153 * PL08X specific defines 154 */ 155 156 /* Size (bytes) of each LLI buffer allocated for one transfer */ 157 # define PL08X_LLI_TSFR_SIZE 0x2000 158 159 /* Maximum times we call dma_pool_alloc on this pool without freeing */ 160 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 161 #define PL08X_ALIGN 8 162 163 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 164 { 165 return container_of(chan, struct pl08x_dma_chan, chan); 166 } 167 168 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 169 { 170 return container_of(tx, struct pl08x_txd, tx); 171 } 172 173 /* 174 * Physical channel handling 175 */ 176 177 /* Whether a certain channel is busy or not */ 178 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 179 { 180 unsigned int val; 181 182 val = readl(ch->base + PL080_CH_CONFIG); 183 return val & PL080_CONFIG_ACTIVE; 184 } 185 186 /* 187 * Set the initial DMA register values i.e. those for the first LLI 188 * The next LLI pointer and the configuration interrupt bit have 189 * been set when the LLIs were constructed. Poke them into the hardware 190 * and start the transfer. 191 */ 192 static void pl08x_start_txd(struct pl08x_dma_chan *plchan, 193 struct pl08x_txd *txd) 194 { 195 struct pl08x_driver_data *pl08x = plchan->host; 196 struct pl08x_phy_chan *phychan = plchan->phychan; 197 struct pl08x_lli *lli = &txd->llis_va[0]; 198 u32 val; 199 200 plchan->at = txd; 201 202 /* Wait for channel inactive */ 203 while (pl08x_phy_channel_busy(phychan)) 204 cpu_relax(); 205 206 dev_vdbg(&pl08x->adev->dev, 207 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 208 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 209 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, 210 txd->ccfg); 211 212 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); 213 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); 214 writel(lli->lli, phychan->base + PL080_CH_LLI); 215 writel(lli->cctl, phychan->base + PL080_CH_CONTROL); 216 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); 217 218 /* Enable the DMA channel */ 219 /* Do not access config register until channel shows as disabled */ 220 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 221 cpu_relax(); 222 223 /* Do not access config register until channel shows as inactive */ 224 val = readl(phychan->base + PL080_CH_CONFIG); 225 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 226 val = readl(phychan->base + PL080_CH_CONFIG); 227 228 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); 229 } 230 231 /* 232 * Pause the channel by setting the HALT bit. 233 * 234 * For M->P transfers, pause the DMAC first and then stop the peripheral - 235 * the FIFO can only drain if the peripheral is still requesting data. 236 * (note: this can still timeout if the DMAC FIFO never drains of data.) 237 * 238 * For P->M transfers, disable the peripheral first to stop it filling 239 * the DMAC FIFO, and then pause the DMAC. 240 */ 241 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 242 { 243 u32 val; 244 int timeout; 245 246 /* Set the HALT bit and wait for the FIFO to drain */ 247 val = readl(ch->base + PL080_CH_CONFIG); 248 val |= PL080_CONFIG_HALT; 249 writel(val, ch->base + PL080_CH_CONFIG); 250 251 /* Wait for channel inactive */ 252 for (timeout = 1000; timeout; timeout--) { 253 if (!pl08x_phy_channel_busy(ch)) 254 break; 255 udelay(1); 256 } 257 if (pl08x_phy_channel_busy(ch)) 258 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 259 } 260 261 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 262 { 263 u32 val; 264 265 /* Clear the HALT bit */ 266 val = readl(ch->base + PL080_CH_CONFIG); 267 val &= ~PL080_CONFIG_HALT; 268 writel(val, ch->base + PL080_CH_CONFIG); 269 } 270 271 /* 272 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 273 * clears any pending interrupt status. This should not be used for 274 * an on-going transfer, but as a method of shutting down a channel 275 * (eg, when it's no longer used) or terminating a transfer. 276 */ 277 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 278 struct pl08x_phy_chan *ch) 279 { 280 u32 val = readl(ch->base + PL080_CH_CONFIG); 281 282 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 283 PL080_CONFIG_TC_IRQ_MASK); 284 285 writel(val, ch->base + PL080_CH_CONFIG); 286 287 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 288 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 289 } 290 291 static inline u32 get_bytes_in_cctl(u32 cctl) 292 { 293 /* The source width defines the number of bytes */ 294 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 295 296 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 297 case PL080_WIDTH_8BIT: 298 break; 299 case PL080_WIDTH_16BIT: 300 bytes *= 2; 301 break; 302 case PL080_WIDTH_32BIT: 303 bytes *= 4; 304 break; 305 } 306 return bytes; 307 } 308 309 /* The channel should be paused when calling this */ 310 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 311 { 312 struct pl08x_phy_chan *ch; 313 struct pl08x_txd *txd; 314 unsigned long flags; 315 size_t bytes = 0; 316 317 spin_lock_irqsave(&plchan->lock, flags); 318 ch = plchan->phychan; 319 txd = plchan->at; 320 321 /* 322 * Follow the LLIs to get the number of remaining 323 * bytes in the currently active transaction. 324 */ 325 if (ch && txd) { 326 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 327 328 /* First get the remaining bytes in the active transfer */ 329 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 330 331 if (clli) { 332 struct pl08x_lli *llis_va = txd->llis_va; 333 dma_addr_t llis_bus = txd->llis_bus; 334 int index; 335 336 BUG_ON(clli < llis_bus || clli >= llis_bus + 337 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); 338 339 /* 340 * Locate the next LLI - as this is an array, 341 * it's simple maths to find. 342 */ 343 index = (clli - llis_bus) / sizeof(struct pl08x_lli); 344 345 for (; index < MAX_NUM_TSFR_LLIS; index++) { 346 bytes += get_bytes_in_cctl(llis_va[index].cctl); 347 348 /* 349 * A LLI pointer of 0 terminates the LLI list 350 */ 351 if (!llis_va[index].lli) 352 break; 353 } 354 } 355 } 356 357 /* Sum up all queued transactions */ 358 if (!list_empty(&plchan->pend_list)) { 359 struct pl08x_txd *txdi; 360 list_for_each_entry(txdi, &plchan->pend_list, node) { 361 struct pl08x_sg *dsg; 362 list_for_each_entry(dsg, &txd->dsg_list, node) 363 bytes += dsg->len; 364 } 365 } 366 367 spin_unlock_irqrestore(&plchan->lock, flags); 368 369 return bytes; 370 } 371 372 /* 373 * Allocate a physical channel for a virtual channel 374 * 375 * Try to locate a physical channel to be used for this transfer. If all 376 * are taken return NULL and the requester will have to cope by using 377 * some fallback PIO mode or retrying later. 378 */ 379 static struct pl08x_phy_chan * 380 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 381 struct pl08x_dma_chan *virt_chan) 382 { 383 struct pl08x_phy_chan *ch = NULL; 384 unsigned long flags; 385 int i; 386 387 for (i = 0; i < pl08x->vd->channels; i++) { 388 ch = &pl08x->phy_chans[i]; 389 390 spin_lock_irqsave(&ch->lock, flags); 391 392 if (!ch->locked && !ch->serving) { 393 ch->serving = virt_chan; 394 ch->signal = -1; 395 spin_unlock_irqrestore(&ch->lock, flags); 396 break; 397 } 398 399 spin_unlock_irqrestore(&ch->lock, flags); 400 } 401 402 if (i == pl08x->vd->channels) { 403 /* No physical channel available, cope with it */ 404 return NULL; 405 } 406 407 pm_runtime_get_sync(&pl08x->adev->dev); 408 return ch; 409 } 410 411 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 412 struct pl08x_phy_chan *ch) 413 { 414 unsigned long flags; 415 416 spin_lock_irqsave(&ch->lock, flags); 417 418 /* Stop the channel and clear its interrupts */ 419 pl08x_terminate_phy_chan(pl08x, ch); 420 421 pm_runtime_put(&pl08x->adev->dev); 422 423 /* Mark it as free */ 424 ch->serving = NULL; 425 spin_unlock_irqrestore(&ch->lock, flags); 426 } 427 428 /* 429 * LLI handling 430 */ 431 432 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 433 { 434 switch (coded) { 435 case PL080_WIDTH_8BIT: 436 return 1; 437 case PL080_WIDTH_16BIT: 438 return 2; 439 case PL080_WIDTH_32BIT: 440 return 4; 441 default: 442 break; 443 } 444 BUG(); 445 return 0; 446 } 447 448 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 449 size_t tsize) 450 { 451 u32 retbits = cctl; 452 453 /* Remove all src, dst and transfer size bits */ 454 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 455 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 456 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 457 458 /* Then set the bits according to the parameters */ 459 switch (srcwidth) { 460 case 1: 461 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 462 break; 463 case 2: 464 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 465 break; 466 case 4: 467 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 468 break; 469 default: 470 BUG(); 471 break; 472 } 473 474 switch (dstwidth) { 475 case 1: 476 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 477 break; 478 case 2: 479 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 480 break; 481 case 4: 482 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 483 break; 484 default: 485 BUG(); 486 break; 487 } 488 489 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 490 return retbits; 491 } 492 493 struct pl08x_lli_build_data { 494 struct pl08x_txd *txd; 495 struct pl08x_bus_data srcbus; 496 struct pl08x_bus_data dstbus; 497 size_t remainder; 498 u32 lli_bus; 499 }; 500 501 /* 502 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 503 * victim in case src & dest are not similarly aligned. i.e. If after aligning 504 * masters address with width requirements of transfer (by sending few byte by 505 * byte data), slave is still not aligned, then its width will be reduced to 506 * BYTE. 507 * - prefers the destination bus if both available 508 * - prefers bus with fixed address (i.e. peripheral) 509 */ 510 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 511 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 512 { 513 if (!(cctl & PL080_CONTROL_DST_INCR)) { 514 *mbus = &bd->dstbus; 515 *sbus = &bd->srcbus; 516 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 517 *mbus = &bd->srcbus; 518 *sbus = &bd->dstbus; 519 } else { 520 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 521 *mbus = &bd->dstbus; 522 *sbus = &bd->srcbus; 523 } else { 524 *mbus = &bd->srcbus; 525 *sbus = &bd->dstbus; 526 } 527 } 528 } 529 530 /* 531 * Fills in one LLI for a certain transfer descriptor and advance the counter 532 */ 533 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, 534 int num_llis, int len, u32 cctl) 535 { 536 struct pl08x_lli *llis_va = bd->txd->llis_va; 537 dma_addr_t llis_bus = bd->txd->llis_bus; 538 539 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 540 541 llis_va[num_llis].cctl = cctl; 542 llis_va[num_llis].src = bd->srcbus.addr; 543 llis_va[num_llis].dst = bd->dstbus.addr; 544 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 545 sizeof(struct pl08x_lli); 546 llis_va[num_llis].lli |= bd->lli_bus; 547 548 if (cctl & PL080_CONTROL_SRC_INCR) 549 bd->srcbus.addr += len; 550 if (cctl & PL080_CONTROL_DST_INCR) 551 bd->dstbus.addr += len; 552 553 BUG_ON(bd->remainder < len); 554 555 bd->remainder -= len; 556 } 557 558 static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 559 u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 560 { 561 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 562 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 563 (*total_bytes) += len; 564 } 565 566 /* 567 * This fills in the table of LLIs for the transfer descriptor 568 * Note that we assume we never have to change the burst sizes 569 * Return 0 for error 570 */ 571 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 572 struct pl08x_txd *txd) 573 { 574 struct pl08x_bus_data *mbus, *sbus; 575 struct pl08x_lli_build_data bd; 576 int num_llis = 0; 577 u32 cctl, early_bytes = 0; 578 size_t max_bytes_per_lli, total_bytes; 579 struct pl08x_lli *llis_va; 580 struct pl08x_sg *dsg; 581 582 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 583 if (!txd->llis_va) { 584 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 585 return 0; 586 } 587 588 pl08x->pool_ctr++; 589 590 bd.txd = txd; 591 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 592 cctl = txd->cctl; 593 594 /* Find maximum width of the source bus */ 595 bd.srcbus.maxwidth = 596 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 597 PL080_CONTROL_SWIDTH_SHIFT); 598 599 /* Find maximum width of the destination bus */ 600 bd.dstbus.maxwidth = 601 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 602 PL080_CONTROL_DWIDTH_SHIFT); 603 604 list_for_each_entry(dsg, &txd->dsg_list, node) { 605 total_bytes = 0; 606 cctl = txd->cctl; 607 608 bd.srcbus.addr = dsg->src_addr; 609 bd.dstbus.addr = dsg->dst_addr; 610 bd.remainder = dsg->len; 611 bd.srcbus.buswidth = bd.srcbus.maxwidth; 612 bd.dstbus.buswidth = bd.dstbus.maxwidth; 613 614 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 615 616 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 617 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 618 bd.srcbus.buswidth, 619 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 620 bd.dstbus.buswidth, 621 bd.remainder); 622 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 623 mbus == &bd.srcbus ? "src" : "dst", 624 sbus == &bd.srcbus ? "src" : "dst"); 625 626 /* 627 * Zero length is only allowed if all these requirements are 628 * met: 629 * - flow controller is peripheral. 630 * - src.addr is aligned to src.width 631 * - dst.addr is aligned to dst.width 632 * 633 * sg_len == 1 should be true, as there can be two cases here: 634 * 635 * - Memory addresses are contiguous and are not scattered. 636 * Here, Only one sg will be passed by user driver, with 637 * memory address and zero length. We pass this to controller 638 * and after the transfer it will receive the last burst 639 * request from peripheral and so transfer finishes. 640 * 641 * - Memory addresses are scattered and are not contiguous. 642 * Here, Obviously as DMA controller doesn't know when a lli's 643 * transfer gets over, it can't load next lli. So in this 644 * case, there has to be an assumption that only one lli is 645 * supported. Thus, we can't have scattered addresses. 646 */ 647 if (!bd.remainder) { 648 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 649 PL080_CONFIG_FLOW_CONTROL_SHIFT; 650 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 651 (fc <= PL080_FLOW_SRC2DST_SRC))) { 652 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 653 __func__); 654 return 0; 655 } 656 657 if ((bd.srcbus.addr % bd.srcbus.buswidth) || 658 (bd.dstbus.addr % bd.dstbus.buswidth)) { 659 dev_err(&pl08x->adev->dev, 660 "%s src & dst address must be aligned to src" 661 " & dst width if peripheral is flow controller", 662 __func__); 663 return 0; 664 } 665 666 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 667 bd.dstbus.buswidth, 0); 668 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 669 break; 670 } 671 672 /* 673 * Send byte by byte for following cases 674 * - Less than a bus width available 675 * - until master bus is aligned 676 */ 677 if (bd.remainder < mbus->buswidth) 678 early_bytes = bd.remainder; 679 else if ((mbus->addr) % (mbus->buswidth)) { 680 early_bytes = mbus->buswidth - (mbus->addr) % 681 (mbus->buswidth); 682 if ((bd.remainder - early_bytes) < mbus->buswidth) 683 early_bytes = bd.remainder; 684 } 685 686 if (early_bytes) { 687 dev_vdbg(&pl08x->adev->dev, 688 "%s byte width LLIs (remain 0x%08x)\n", 689 __func__, bd.remainder); 690 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 691 &total_bytes); 692 } 693 694 if (bd.remainder) { 695 /* 696 * Master now aligned 697 * - if slave is not then we must set its width down 698 */ 699 if (sbus->addr % sbus->buswidth) { 700 dev_dbg(&pl08x->adev->dev, 701 "%s set down bus width to one byte\n", 702 __func__); 703 704 sbus->buswidth = 1; 705 } 706 707 /* 708 * Bytes transferred = tsize * src width, not 709 * MIN(buswidths) 710 */ 711 max_bytes_per_lli = bd.srcbus.buswidth * 712 PL080_CONTROL_TRANSFER_SIZE_MASK; 713 dev_vdbg(&pl08x->adev->dev, 714 "%s max bytes per lli = %zu\n", 715 __func__, max_bytes_per_lli); 716 717 /* 718 * Make largest possible LLIs until less than one bus 719 * width left 720 */ 721 while (bd.remainder > (mbus->buswidth - 1)) { 722 size_t lli_len, tsize, width; 723 724 /* 725 * If enough left try to send max possible, 726 * otherwise try to send the remainder 727 */ 728 lli_len = min(bd.remainder, max_bytes_per_lli); 729 730 /* 731 * Check against maximum bus alignment: 732 * Calculate actual transfer size in relation to 733 * bus width an get a maximum remainder of the 734 * highest bus width - 1 735 */ 736 width = max(mbus->buswidth, sbus->buswidth); 737 lli_len = (lli_len / width) * width; 738 tsize = lli_len / bd.srcbus.buswidth; 739 740 dev_vdbg(&pl08x->adev->dev, 741 "%s fill lli with single lli chunk of " 742 "size 0x%08zx (remainder 0x%08zx)\n", 743 __func__, lli_len, bd.remainder); 744 745 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 746 bd.dstbus.buswidth, tsize); 747 pl08x_fill_lli_for_desc(&bd, num_llis++, 748 lli_len, cctl); 749 total_bytes += lli_len; 750 } 751 752 /* 753 * Send any odd bytes 754 */ 755 if (bd.remainder) { 756 dev_vdbg(&pl08x->adev->dev, 757 "%s align with boundary, send odd bytes (remain %zu)\n", 758 __func__, bd.remainder); 759 prep_byte_width_lli(&bd, &cctl, bd.remainder, 760 num_llis++, &total_bytes); 761 } 762 } 763 764 if (total_bytes != dsg->len) { 765 dev_err(&pl08x->adev->dev, 766 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 767 __func__, total_bytes, dsg->len); 768 return 0; 769 } 770 771 if (num_llis >= MAX_NUM_TSFR_LLIS) { 772 dev_err(&pl08x->adev->dev, 773 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 774 __func__, (u32) MAX_NUM_TSFR_LLIS); 775 return 0; 776 } 777 } 778 779 llis_va = txd->llis_va; 780 /* The final LLI terminates the LLI. */ 781 llis_va[num_llis - 1].lli = 0; 782 /* The final LLI element shall also fire an interrupt. */ 783 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 784 785 #ifdef VERBOSE_DEBUG 786 { 787 int i; 788 789 dev_vdbg(&pl08x->adev->dev, 790 "%-3s %-9s %-10s %-10s %-10s %s\n", 791 "lli", "", "csrc", "cdst", "clli", "cctl"); 792 for (i = 0; i < num_llis; i++) { 793 dev_vdbg(&pl08x->adev->dev, 794 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 795 i, &llis_va[i], llis_va[i].src, 796 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl 797 ); 798 } 799 } 800 #endif 801 802 return num_llis; 803 } 804 805 /* You should call this with the struct pl08x lock held */ 806 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 807 struct pl08x_txd *txd) 808 { 809 struct pl08x_sg *dsg, *_dsg; 810 811 /* Free the LLI */ 812 if (txd->llis_va) 813 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 814 815 pl08x->pool_ctr--; 816 817 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 818 list_del(&dsg->node); 819 kfree(dsg); 820 } 821 822 kfree(txd); 823 } 824 825 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 826 struct pl08x_dma_chan *plchan) 827 { 828 struct pl08x_txd *txdi = NULL; 829 struct pl08x_txd *next; 830 831 if (!list_empty(&plchan->pend_list)) { 832 list_for_each_entry_safe(txdi, 833 next, &plchan->pend_list, node) { 834 list_del(&txdi->node); 835 pl08x_free_txd(pl08x, txdi); 836 } 837 } 838 } 839 840 /* 841 * The DMA ENGINE API 842 */ 843 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 844 { 845 return 0; 846 } 847 848 static void pl08x_free_chan_resources(struct dma_chan *chan) 849 { 850 } 851 852 /* 853 * This should be called with the channel plchan->lock held 854 */ 855 static int prep_phy_channel(struct pl08x_dma_chan *plchan, 856 struct pl08x_txd *txd) 857 { 858 struct pl08x_driver_data *pl08x = plchan->host; 859 struct pl08x_phy_chan *ch; 860 int ret; 861 862 /* Check if we already have a channel */ 863 if (plchan->phychan) { 864 ch = plchan->phychan; 865 goto got_channel; 866 } 867 868 ch = pl08x_get_phy_channel(pl08x, plchan); 869 if (!ch) { 870 /* No physical channel available, cope with it */ 871 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 872 return -EBUSY; 873 } 874 875 /* 876 * OK we have a physical channel: for memcpy() this is all we 877 * need, but for slaves the physical signals may be muxed! 878 * Can the platform allow us to use this channel? 879 */ 880 if (plchan->slave && pl08x->pd->get_signal) { 881 ret = pl08x->pd->get_signal(plchan); 882 if (ret < 0) { 883 dev_dbg(&pl08x->adev->dev, 884 "unable to use physical channel %d for transfer on %s due to platform restrictions\n", 885 ch->id, plchan->name); 886 /* Release physical channel & return */ 887 pl08x_put_phy_channel(pl08x, ch); 888 return -EBUSY; 889 } 890 ch->signal = ret; 891 } 892 893 plchan->phychan = ch; 894 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 895 ch->id, 896 ch->signal, 897 plchan->name); 898 899 got_channel: 900 /* Assign the flow control signal to this channel */ 901 if (txd->direction == DMA_MEM_TO_DEV) 902 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; 903 else if (txd->direction == DMA_DEV_TO_MEM) 904 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; 905 906 plchan->phychan_hold++; 907 908 return 0; 909 } 910 911 static void release_phy_channel(struct pl08x_dma_chan *plchan) 912 { 913 struct pl08x_driver_data *pl08x = plchan->host; 914 915 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { 916 pl08x->pd->put_signal(plchan); 917 plchan->phychan->signal = -1; 918 } 919 pl08x_put_phy_channel(pl08x, plchan->phychan); 920 plchan->phychan = NULL; 921 } 922 923 static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 924 { 925 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 926 struct pl08x_txd *txd = to_pl08x_txd(tx); 927 unsigned long flags; 928 dma_cookie_t cookie; 929 930 spin_lock_irqsave(&plchan->lock, flags); 931 cookie = dma_cookie_assign(tx); 932 933 /* Put this onto the pending list */ 934 list_add_tail(&txd->node, &plchan->pend_list); 935 936 /* 937 * If there was no physical channel available for this memcpy, 938 * stack the request up and indicate that the channel is waiting 939 * for a free physical channel. 940 */ 941 if (!plchan->slave && !plchan->phychan) { 942 /* Do this memcpy whenever there is a channel ready */ 943 plchan->state = PL08X_CHAN_WAITING; 944 plchan->waiting = txd; 945 } else { 946 plchan->phychan_hold--; 947 } 948 949 spin_unlock_irqrestore(&plchan->lock, flags); 950 951 return cookie; 952 } 953 954 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 955 struct dma_chan *chan, unsigned long flags) 956 { 957 struct dma_async_tx_descriptor *retval = NULL; 958 959 return retval; 960 } 961 962 /* 963 * Code accessing dma_async_is_complete() in a tight loop may give problems. 964 * If slaves are relying on interrupts to signal completion this function 965 * must not be called with interrupts disabled. 966 */ 967 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 968 dma_cookie_t cookie, struct dma_tx_state *txstate) 969 { 970 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 971 enum dma_status ret; 972 973 ret = dma_cookie_status(chan, cookie, txstate); 974 if (ret == DMA_SUCCESS) 975 return ret; 976 977 /* 978 * This cookie not complete yet 979 * Get number of bytes left in the active transactions and queue 980 */ 981 dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); 982 983 if (plchan->state == PL08X_CHAN_PAUSED) 984 return DMA_PAUSED; 985 986 /* Whether waiting or running, we're in progress */ 987 return DMA_IN_PROGRESS; 988 } 989 990 /* PrimeCell DMA extension */ 991 struct burst_table { 992 u32 burstwords; 993 u32 reg; 994 }; 995 996 static const struct burst_table burst_sizes[] = { 997 { 998 .burstwords = 256, 999 .reg = PL080_BSIZE_256, 1000 }, 1001 { 1002 .burstwords = 128, 1003 .reg = PL080_BSIZE_128, 1004 }, 1005 { 1006 .burstwords = 64, 1007 .reg = PL080_BSIZE_64, 1008 }, 1009 { 1010 .burstwords = 32, 1011 .reg = PL080_BSIZE_32, 1012 }, 1013 { 1014 .burstwords = 16, 1015 .reg = PL080_BSIZE_16, 1016 }, 1017 { 1018 .burstwords = 8, 1019 .reg = PL080_BSIZE_8, 1020 }, 1021 { 1022 .burstwords = 4, 1023 .reg = PL080_BSIZE_4, 1024 }, 1025 { 1026 .burstwords = 0, 1027 .reg = PL080_BSIZE_1, 1028 }, 1029 }; 1030 1031 /* 1032 * Given the source and destination available bus masks, select which 1033 * will be routed to each port. We try to have source and destination 1034 * on separate ports, but always respect the allowable settings. 1035 */ 1036 static u32 pl08x_select_bus(u8 src, u8 dst) 1037 { 1038 u32 cctl = 0; 1039 1040 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1041 cctl |= PL080_CONTROL_DST_AHB2; 1042 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1043 cctl |= PL080_CONTROL_SRC_AHB2; 1044 1045 return cctl; 1046 } 1047 1048 static u32 pl08x_cctl(u32 cctl) 1049 { 1050 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1051 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1052 PL080_CONTROL_PROT_MASK); 1053 1054 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1055 return cctl | PL080_CONTROL_PROT_SYS; 1056 } 1057 1058 static u32 pl08x_width(enum dma_slave_buswidth width) 1059 { 1060 switch (width) { 1061 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1062 return PL080_WIDTH_8BIT; 1063 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1064 return PL080_WIDTH_16BIT; 1065 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1066 return PL080_WIDTH_32BIT; 1067 default: 1068 return ~0; 1069 } 1070 } 1071 1072 static u32 pl08x_burst(u32 maxburst) 1073 { 1074 int i; 1075 1076 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1077 if (burst_sizes[i].burstwords <= maxburst) 1078 break; 1079 1080 return burst_sizes[i].reg; 1081 } 1082 1083 static int dma_set_runtime_config(struct dma_chan *chan, 1084 struct dma_slave_config *config) 1085 { 1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1087 struct pl08x_driver_data *pl08x = plchan->host; 1088 enum dma_slave_buswidth addr_width; 1089 u32 width, burst, maxburst; 1090 u32 cctl = 0; 1091 1092 if (!plchan->slave) 1093 return -EINVAL; 1094 1095 /* Transfer direction */ 1096 plchan->runtime_direction = config->direction; 1097 if (config->direction == DMA_MEM_TO_DEV) { 1098 addr_width = config->dst_addr_width; 1099 maxburst = config->dst_maxburst; 1100 } else if (config->direction == DMA_DEV_TO_MEM) { 1101 addr_width = config->src_addr_width; 1102 maxburst = config->src_maxburst; 1103 } else { 1104 dev_err(&pl08x->adev->dev, 1105 "bad runtime_config: alien transfer direction\n"); 1106 return -EINVAL; 1107 } 1108 1109 width = pl08x_width(addr_width); 1110 if (width == ~0) { 1111 dev_err(&pl08x->adev->dev, 1112 "bad runtime_config: alien address width\n"); 1113 return -EINVAL; 1114 } 1115 1116 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1117 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1118 1119 /* 1120 * If this channel will only request single transfers, set this 1121 * down to ONE element. Also select one element if no maxburst 1122 * is specified. 1123 */ 1124 if (plchan->cd->single) 1125 maxburst = 1; 1126 1127 burst = pl08x_burst(maxburst); 1128 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1129 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1130 1131 plchan->device_fc = config->device_fc; 1132 1133 if (plchan->runtime_direction == DMA_DEV_TO_MEM) { 1134 plchan->src_addr = config->src_addr; 1135 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1136 pl08x_select_bus(plchan->cd->periph_buses, 1137 pl08x->mem_buses); 1138 } else { 1139 plchan->dst_addr = config->dst_addr; 1140 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | 1141 pl08x_select_bus(pl08x->mem_buses, 1142 plchan->cd->periph_buses); 1143 } 1144 1145 dev_dbg(&pl08x->adev->dev, 1146 "configured channel %s (%s) for %s, data width %d, " 1147 "maxburst %d words, LE, CCTL=0x%08x\n", 1148 dma_chan_name(chan), plchan->name, 1149 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 1150 addr_width, 1151 maxburst, 1152 cctl); 1153 1154 return 0; 1155 } 1156 1157 /* 1158 * Slave transactions callback to the slave device to allow 1159 * synchronization of slave DMA signals with the DMAC enable 1160 */ 1161 static void pl08x_issue_pending(struct dma_chan *chan) 1162 { 1163 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1164 unsigned long flags; 1165 1166 spin_lock_irqsave(&plchan->lock, flags); 1167 /* Something is already active, or we're waiting for a channel... */ 1168 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { 1169 spin_unlock_irqrestore(&plchan->lock, flags); 1170 return; 1171 } 1172 1173 /* Take the first element in the queue and execute it */ 1174 if (!list_empty(&plchan->pend_list)) { 1175 struct pl08x_txd *next; 1176 1177 next = list_first_entry(&plchan->pend_list, 1178 struct pl08x_txd, 1179 node); 1180 list_del(&next->node); 1181 plchan->state = PL08X_CHAN_RUNNING; 1182 1183 pl08x_start_txd(plchan, next); 1184 } 1185 1186 spin_unlock_irqrestore(&plchan->lock, flags); 1187 } 1188 1189 static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1190 struct pl08x_txd *txd) 1191 { 1192 struct pl08x_driver_data *pl08x = plchan->host; 1193 unsigned long flags; 1194 int num_llis, ret; 1195 1196 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1197 if (!num_llis) { 1198 spin_lock_irqsave(&plchan->lock, flags); 1199 pl08x_free_txd(pl08x, txd); 1200 spin_unlock_irqrestore(&plchan->lock, flags); 1201 return -EINVAL; 1202 } 1203 1204 spin_lock_irqsave(&plchan->lock, flags); 1205 1206 /* 1207 * See if we already have a physical channel allocated, 1208 * else this is the time to try to get one. 1209 */ 1210 ret = prep_phy_channel(plchan, txd); 1211 if (ret) { 1212 /* 1213 * No physical channel was available. 1214 * 1215 * memcpy transfers can be sorted out at submission time. 1216 * 1217 * Slave transfers may have been denied due to platform 1218 * channel muxing restrictions. Since there is no guarantee 1219 * that this will ever be resolved, and the signal must be 1220 * acquired AFTER acquiring the physical channel, we will let 1221 * them be NACK:ed with -EBUSY here. The drivers can retry 1222 * the prep() call if they are eager on doing this using DMA. 1223 */ 1224 if (plchan->slave) { 1225 pl08x_free_txd_list(pl08x, plchan); 1226 pl08x_free_txd(pl08x, txd); 1227 spin_unlock_irqrestore(&plchan->lock, flags); 1228 return -EBUSY; 1229 } 1230 } else 1231 /* 1232 * Else we're all set, paused and ready to roll, status 1233 * will switch to PL08X_CHAN_RUNNING when we call 1234 * issue_pending(). If there is something running on the 1235 * channel already we don't change its state. 1236 */ 1237 if (plchan->state == PL08X_CHAN_IDLE) 1238 plchan->state = PL08X_CHAN_PAUSED; 1239 1240 spin_unlock_irqrestore(&plchan->lock, flags); 1241 1242 return 0; 1243 } 1244 1245 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1246 unsigned long flags) 1247 { 1248 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1249 1250 if (txd) { 1251 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1252 txd->tx.flags = flags; 1253 txd->tx.tx_submit = pl08x_tx_submit; 1254 INIT_LIST_HEAD(&txd->node); 1255 INIT_LIST_HEAD(&txd->dsg_list); 1256 1257 /* Always enable error and terminal interrupts */ 1258 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1259 PL080_CONFIG_TC_IRQ_MASK; 1260 } 1261 return txd; 1262 } 1263 1264 /* 1265 * Initialize a descriptor to be used by memcpy submit 1266 */ 1267 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1268 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1269 size_t len, unsigned long flags) 1270 { 1271 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1272 struct pl08x_driver_data *pl08x = plchan->host; 1273 struct pl08x_txd *txd; 1274 struct pl08x_sg *dsg; 1275 int ret; 1276 1277 txd = pl08x_get_txd(plchan, flags); 1278 if (!txd) { 1279 dev_err(&pl08x->adev->dev, 1280 "%s no memory for descriptor\n", __func__); 1281 return NULL; 1282 } 1283 1284 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1285 if (!dsg) { 1286 pl08x_free_txd(pl08x, txd); 1287 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1288 __func__); 1289 return NULL; 1290 } 1291 list_add_tail(&dsg->node, &txd->dsg_list); 1292 1293 txd->direction = DMA_NONE; 1294 dsg->src_addr = src; 1295 dsg->dst_addr = dest; 1296 dsg->len = len; 1297 1298 /* Set platform data for m2m */ 1299 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1300 txd->cctl = pl08x->pd->memcpy_channel.cctl & 1301 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1302 1303 /* Both to be incremented or the code will break */ 1304 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1305 1306 if (pl08x->vd->dualmaster) 1307 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1308 pl08x->mem_buses); 1309 1310 ret = pl08x_prep_channel_resources(plchan, txd); 1311 if (ret) 1312 return NULL; 1313 1314 return &txd->tx; 1315 } 1316 1317 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1318 struct dma_chan *chan, struct scatterlist *sgl, 1319 unsigned int sg_len, enum dma_transfer_direction direction, 1320 unsigned long flags, void *context) 1321 { 1322 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1323 struct pl08x_driver_data *pl08x = plchan->host; 1324 struct pl08x_txd *txd; 1325 struct pl08x_sg *dsg; 1326 struct scatterlist *sg; 1327 dma_addr_t slave_addr; 1328 int ret, tmp; 1329 1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1331 __func__, sg_dma_len(sgl), plchan->name); 1332 1333 txd = pl08x_get_txd(plchan, flags); 1334 if (!txd) { 1335 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1336 return NULL; 1337 } 1338 1339 if (direction != plchan->runtime_direction) 1340 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1341 "the direction configured for the PrimeCell\n", 1342 __func__); 1343 1344 /* 1345 * Set up addresses, the PrimeCell configured address 1346 * will take precedence since this may configure the 1347 * channel target address dynamically at runtime. 1348 */ 1349 txd->direction = direction; 1350 1351 if (direction == DMA_MEM_TO_DEV) { 1352 txd->cctl = plchan->dst_cctl; 1353 slave_addr = plchan->dst_addr; 1354 } else if (direction == DMA_DEV_TO_MEM) { 1355 txd->cctl = plchan->src_cctl; 1356 slave_addr = plchan->src_addr; 1357 } else { 1358 pl08x_free_txd(pl08x, txd); 1359 dev_err(&pl08x->adev->dev, 1360 "%s direction unsupported\n", __func__); 1361 return NULL; 1362 } 1363 1364 if (plchan->device_fc) 1365 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1366 PL080_FLOW_PER2MEM_PER; 1367 else 1368 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1369 PL080_FLOW_PER2MEM; 1370 1371 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1372 1373 for_each_sg(sgl, sg, sg_len, tmp) { 1374 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1375 if (!dsg) { 1376 pl08x_free_txd(pl08x, txd); 1377 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1378 __func__); 1379 return NULL; 1380 } 1381 list_add_tail(&dsg->node, &txd->dsg_list); 1382 1383 dsg->len = sg_dma_len(sg); 1384 if (direction == DMA_MEM_TO_DEV) { 1385 dsg->src_addr = sg_dma_address(sg); 1386 dsg->dst_addr = slave_addr; 1387 } else { 1388 dsg->src_addr = slave_addr; 1389 dsg->dst_addr = sg_dma_address(sg); 1390 } 1391 } 1392 1393 ret = pl08x_prep_channel_resources(plchan, txd); 1394 if (ret) 1395 return NULL; 1396 1397 return &txd->tx; 1398 } 1399 1400 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1401 unsigned long arg) 1402 { 1403 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1404 struct pl08x_driver_data *pl08x = plchan->host; 1405 unsigned long flags; 1406 int ret = 0; 1407 1408 /* Controls applicable to inactive channels */ 1409 if (cmd == DMA_SLAVE_CONFIG) { 1410 return dma_set_runtime_config(chan, 1411 (struct dma_slave_config *)arg); 1412 } 1413 1414 /* 1415 * Anything succeeds on channels with no physical allocation and 1416 * no queued transfers. 1417 */ 1418 spin_lock_irqsave(&plchan->lock, flags); 1419 if (!plchan->phychan && !plchan->at) { 1420 spin_unlock_irqrestore(&plchan->lock, flags); 1421 return 0; 1422 } 1423 1424 switch (cmd) { 1425 case DMA_TERMINATE_ALL: 1426 plchan->state = PL08X_CHAN_IDLE; 1427 1428 if (plchan->phychan) { 1429 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 1430 1431 /* 1432 * Mark physical channel as free and free any slave 1433 * signal 1434 */ 1435 release_phy_channel(plchan); 1436 plchan->phychan_hold = 0; 1437 } 1438 /* Dequeue jobs and free LLIs */ 1439 if (plchan->at) { 1440 pl08x_free_txd(pl08x, plchan->at); 1441 plchan->at = NULL; 1442 } 1443 /* Dequeue jobs not yet fired as well */ 1444 pl08x_free_txd_list(pl08x, plchan); 1445 break; 1446 case DMA_PAUSE: 1447 pl08x_pause_phy_chan(plchan->phychan); 1448 plchan->state = PL08X_CHAN_PAUSED; 1449 break; 1450 case DMA_RESUME: 1451 pl08x_resume_phy_chan(plchan->phychan); 1452 plchan->state = PL08X_CHAN_RUNNING; 1453 break; 1454 default: 1455 /* Unknown command */ 1456 ret = -ENXIO; 1457 break; 1458 } 1459 1460 spin_unlock_irqrestore(&plchan->lock, flags); 1461 1462 return ret; 1463 } 1464 1465 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1466 { 1467 struct pl08x_dma_chan *plchan; 1468 char *name = chan_id; 1469 1470 /* Reject channels for devices not bound to this driver */ 1471 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1472 return false; 1473 1474 plchan = to_pl08x_chan(chan); 1475 1476 /* Check that the channel is not taken! */ 1477 if (!strcmp(plchan->name, name)) 1478 return true; 1479 1480 return false; 1481 } 1482 1483 /* 1484 * Just check that the device is there and active 1485 * TODO: turn this bit on/off depending on the number of physical channels 1486 * actually used, if it is zero... well shut it off. That will save some 1487 * power. Cut the clock at the same time. 1488 */ 1489 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1490 { 1491 /* The Nomadik variant does not have the config register */ 1492 if (pl08x->vd->nomadik) 1493 return; 1494 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1495 } 1496 1497 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1498 { 1499 struct device *dev = txd->tx.chan->device->dev; 1500 struct pl08x_sg *dsg; 1501 1502 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1503 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1504 list_for_each_entry(dsg, &txd->dsg_list, node) 1505 dma_unmap_single(dev, dsg->src_addr, dsg->len, 1506 DMA_TO_DEVICE); 1507 else { 1508 list_for_each_entry(dsg, &txd->dsg_list, node) 1509 dma_unmap_page(dev, dsg->src_addr, dsg->len, 1510 DMA_TO_DEVICE); 1511 } 1512 } 1513 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1514 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1515 list_for_each_entry(dsg, &txd->dsg_list, node) 1516 dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1517 DMA_FROM_DEVICE); 1518 else 1519 list_for_each_entry(dsg, &txd->dsg_list, node) 1520 dma_unmap_page(dev, dsg->dst_addr, dsg->len, 1521 DMA_FROM_DEVICE); 1522 } 1523 } 1524 1525 static void pl08x_tasklet(unsigned long data) 1526 { 1527 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1528 struct pl08x_driver_data *pl08x = plchan->host; 1529 struct pl08x_txd *txd; 1530 unsigned long flags; 1531 1532 spin_lock_irqsave(&plchan->lock, flags); 1533 1534 txd = plchan->at; 1535 plchan->at = NULL; 1536 1537 if (txd) { 1538 /* Update last completed */ 1539 dma_cookie_complete(&txd->tx); 1540 } 1541 1542 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1543 if (!list_empty(&plchan->pend_list)) { 1544 struct pl08x_txd *next; 1545 1546 next = list_first_entry(&plchan->pend_list, 1547 struct pl08x_txd, 1548 node); 1549 list_del(&next->node); 1550 1551 pl08x_start_txd(plchan, next); 1552 } else if (plchan->phychan_hold) { 1553 /* 1554 * This channel is still in use - we have a new txd being 1555 * prepared and will soon be queued. Don't give up the 1556 * physical channel. 1557 */ 1558 } else { 1559 struct pl08x_dma_chan *waiting = NULL; 1560 1561 /* 1562 * No more jobs, so free up the physical channel 1563 * Free any allocated signal on slave transfers too 1564 */ 1565 release_phy_channel(plchan); 1566 plchan->state = PL08X_CHAN_IDLE; 1567 1568 /* 1569 * And NOW before anyone else can grab that free:d up 1570 * physical channel, see if there is some memcpy pending 1571 * that seriously needs to start because of being stacked 1572 * up while we were choking the physical channels with data. 1573 */ 1574 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1575 chan.device_node) { 1576 if (waiting->state == PL08X_CHAN_WAITING && 1577 waiting->waiting != NULL) { 1578 int ret; 1579 1580 /* This should REALLY not fail now */ 1581 ret = prep_phy_channel(waiting, 1582 waiting->waiting); 1583 BUG_ON(ret); 1584 waiting->phychan_hold--; 1585 waiting->state = PL08X_CHAN_RUNNING; 1586 waiting->waiting = NULL; 1587 pl08x_issue_pending(&waiting->chan); 1588 break; 1589 } 1590 } 1591 } 1592 1593 spin_unlock_irqrestore(&plchan->lock, flags); 1594 1595 if (txd) { 1596 dma_async_tx_callback callback = txd->tx.callback; 1597 void *callback_param = txd->tx.callback_param; 1598 1599 /* Don't try to unmap buffers on slave channels */ 1600 if (!plchan->slave) 1601 pl08x_unmap_buffers(txd); 1602 1603 /* Free the descriptor */ 1604 spin_lock_irqsave(&plchan->lock, flags); 1605 pl08x_free_txd(pl08x, txd); 1606 spin_unlock_irqrestore(&plchan->lock, flags); 1607 1608 /* Callback to signal completion */ 1609 if (callback) 1610 callback(callback_param); 1611 } 1612 } 1613 1614 static irqreturn_t pl08x_irq(int irq, void *dev) 1615 { 1616 struct pl08x_driver_data *pl08x = dev; 1617 u32 mask = 0, err, tc, i; 1618 1619 /* check & clear - ERR & TC interrupts */ 1620 err = readl(pl08x->base + PL080_ERR_STATUS); 1621 if (err) { 1622 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1623 __func__, err); 1624 writel(err, pl08x->base + PL080_ERR_CLEAR); 1625 } 1626 tc = readl(pl08x->base + PL080_TC_STATUS); 1627 if (tc) 1628 writel(tc, pl08x->base + PL080_TC_CLEAR); 1629 1630 if (!err && !tc) 1631 return IRQ_NONE; 1632 1633 for (i = 0; i < pl08x->vd->channels; i++) { 1634 if (((1 << i) & err) || ((1 << i) & tc)) { 1635 /* Locate physical channel */ 1636 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1637 struct pl08x_dma_chan *plchan = phychan->serving; 1638 1639 if (!plchan) { 1640 dev_err(&pl08x->adev->dev, 1641 "%s Error TC interrupt on unused channel: 0x%08x\n", 1642 __func__, i); 1643 continue; 1644 } 1645 1646 /* Schedule tasklet on this channel */ 1647 tasklet_schedule(&plchan->tasklet); 1648 mask |= (1 << i); 1649 } 1650 } 1651 1652 return mask ? IRQ_HANDLED : IRQ_NONE; 1653 } 1654 1655 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1656 { 1657 u32 cctl = pl08x_cctl(chan->cd->cctl); 1658 1659 chan->slave = true; 1660 chan->name = chan->cd->bus_id; 1661 chan->src_addr = chan->cd->addr; 1662 chan->dst_addr = chan->cd->addr; 1663 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | 1664 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); 1665 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | 1666 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); 1667 } 1668 1669 /* 1670 * Initialise the DMAC memcpy/slave channels. 1671 * Make a local wrapper to hold required data 1672 */ 1673 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1674 struct dma_device *dmadev, unsigned int channels, bool slave) 1675 { 1676 struct pl08x_dma_chan *chan; 1677 int i; 1678 1679 INIT_LIST_HEAD(&dmadev->channels); 1680 1681 /* 1682 * Register as many many memcpy as we have physical channels, 1683 * we won't always be able to use all but the code will have 1684 * to cope with that situation. 1685 */ 1686 for (i = 0; i < channels; i++) { 1687 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1688 if (!chan) { 1689 dev_err(&pl08x->adev->dev, 1690 "%s no memory for channel\n", __func__); 1691 return -ENOMEM; 1692 } 1693 1694 chan->host = pl08x; 1695 chan->state = PL08X_CHAN_IDLE; 1696 1697 if (slave) { 1698 chan->cd = &pl08x->pd->slave_channels[i]; 1699 pl08x_dma_slave_init(chan); 1700 } else { 1701 chan->cd = &pl08x->pd->memcpy_channel; 1702 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1703 if (!chan->name) { 1704 kfree(chan); 1705 return -ENOMEM; 1706 } 1707 } 1708 if (chan->cd->circular_buffer) { 1709 dev_err(&pl08x->adev->dev, 1710 "channel %s: circular buffers not supported\n", 1711 chan->name); 1712 kfree(chan); 1713 continue; 1714 } 1715 dev_dbg(&pl08x->adev->dev, 1716 "initialize virtual channel \"%s\"\n", 1717 chan->name); 1718 1719 chan->chan.device = dmadev; 1720 dma_cookie_init(&chan->chan); 1721 1722 spin_lock_init(&chan->lock); 1723 INIT_LIST_HEAD(&chan->pend_list); 1724 tasklet_init(&chan->tasklet, pl08x_tasklet, 1725 (unsigned long) chan); 1726 1727 list_add_tail(&chan->chan.device_node, &dmadev->channels); 1728 } 1729 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1730 i, slave ? "slave" : "memcpy"); 1731 return i; 1732 } 1733 1734 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1735 { 1736 struct pl08x_dma_chan *chan = NULL; 1737 struct pl08x_dma_chan *next; 1738 1739 list_for_each_entry_safe(chan, 1740 next, &dmadev->channels, chan.device_node) { 1741 list_del(&chan->chan.device_node); 1742 kfree(chan); 1743 } 1744 } 1745 1746 #ifdef CONFIG_DEBUG_FS 1747 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1748 { 1749 switch (state) { 1750 case PL08X_CHAN_IDLE: 1751 return "idle"; 1752 case PL08X_CHAN_RUNNING: 1753 return "running"; 1754 case PL08X_CHAN_PAUSED: 1755 return "paused"; 1756 case PL08X_CHAN_WAITING: 1757 return "waiting"; 1758 default: 1759 break; 1760 } 1761 return "UNKNOWN STATE"; 1762 } 1763 1764 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1765 { 1766 struct pl08x_driver_data *pl08x = s->private; 1767 struct pl08x_dma_chan *chan; 1768 struct pl08x_phy_chan *ch; 1769 unsigned long flags; 1770 int i; 1771 1772 seq_printf(s, "PL08x physical channels:\n"); 1773 seq_printf(s, "CHANNEL:\tUSER:\n"); 1774 seq_printf(s, "--------\t-----\n"); 1775 for (i = 0; i < pl08x->vd->channels; i++) { 1776 struct pl08x_dma_chan *virt_chan; 1777 1778 ch = &pl08x->phy_chans[i]; 1779 1780 spin_lock_irqsave(&ch->lock, flags); 1781 virt_chan = ch->serving; 1782 1783 seq_printf(s, "%d\t\t%s%s\n", 1784 ch->id, 1785 virt_chan ? virt_chan->name : "(none)", 1786 ch->locked ? " LOCKED" : ""); 1787 1788 spin_unlock_irqrestore(&ch->lock, flags); 1789 } 1790 1791 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1792 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1793 seq_printf(s, "--------\t------\n"); 1794 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1795 seq_printf(s, "%s\t\t%s\n", chan->name, 1796 pl08x_state_str(chan->state)); 1797 } 1798 1799 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1800 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1801 seq_printf(s, "--------\t------\n"); 1802 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1803 seq_printf(s, "%s\t\t%s\n", chan->name, 1804 pl08x_state_str(chan->state)); 1805 } 1806 1807 return 0; 1808 } 1809 1810 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1811 { 1812 return single_open(file, pl08x_debugfs_show, inode->i_private); 1813 } 1814 1815 static const struct file_operations pl08x_debugfs_operations = { 1816 .open = pl08x_debugfs_open, 1817 .read = seq_read, 1818 .llseek = seq_lseek, 1819 .release = single_release, 1820 }; 1821 1822 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1823 { 1824 /* Expose a simple debugfs interface to view all clocks */ 1825 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 1826 S_IFREG | S_IRUGO, NULL, pl08x, 1827 &pl08x_debugfs_operations); 1828 } 1829 1830 #else 1831 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1832 { 1833 } 1834 #endif 1835 1836 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 1837 { 1838 struct pl08x_driver_data *pl08x; 1839 const struct vendor_data *vd = id->data; 1840 int ret = 0; 1841 int i; 1842 1843 ret = amba_request_regions(adev, NULL); 1844 if (ret) 1845 return ret; 1846 1847 /* Create the driver state holder */ 1848 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 1849 if (!pl08x) { 1850 ret = -ENOMEM; 1851 goto out_no_pl08x; 1852 } 1853 1854 pm_runtime_set_active(&adev->dev); 1855 pm_runtime_enable(&adev->dev); 1856 1857 /* Initialize memcpy engine */ 1858 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1859 pl08x->memcpy.dev = &adev->dev; 1860 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1861 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1862 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1863 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1864 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1865 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1866 pl08x->memcpy.device_control = pl08x_control; 1867 1868 /* Initialize slave engine */ 1869 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1870 pl08x->slave.dev = &adev->dev; 1871 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1872 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1873 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1874 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1875 pl08x->slave.device_issue_pending = pl08x_issue_pending; 1876 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1877 pl08x->slave.device_control = pl08x_control; 1878 1879 /* Get the platform data */ 1880 pl08x->pd = dev_get_platdata(&adev->dev); 1881 if (!pl08x->pd) { 1882 dev_err(&adev->dev, "no platform data supplied\n"); 1883 goto out_no_platdata; 1884 } 1885 1886 /* Assign useful pointers to the driver state */ 1887 pl08x->adev = adev; 1888 pl08x->vd = vd; 1889 1890 /* By default, AHB1 only. If dualmaster, from platform */ 1891 pl08x->lli_buses = PL08X_AHB1; 1892 pl08x->mem_buses = PL08X_AHB1; 1893 if (pl08x->vd->dualmaster) { 1894 pl08x->lli_buses = pl08x->pd->lli_buses; 1895 pl08x->mem_buses = pl08x->pd->mem_buses; 1896 } 1897 1898 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1899 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1900 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1901 if (!pl08x->pool) { 1902 ret = -ENOMEM; 1903 goto out_no_lli_pool; 1904 } 1905 1906 spin_lock_init(&pl08x->lock); 1907 1908 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 1909 if (!pl08x->base) { 1910 ret = -ENOMEM; 1911 goto out_no_ioremap; 1912 } 1913 1914 /* Turn on the PL08x */ 1915 pl08x_ensure_on(pl08x); 1916 1917 /* Attach the interrupt handler */ 1918 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1919 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1920 1921 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1922 DRIVER_NAME, pl08x); 1923 if (ret) { 1924 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1925 __func__, adev->irq[0]); 1926 goto out_no_irq; 1927 } 1928 1929 /* Initialize physical channels */ 1930 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1931 GFP_KERNEL); 1932 if (!pl08x->phy_chans) { 1933 dev_err(&adev->dev, "%s failed to allocate " 1934 "physical channel holders\n", 1935 __func__); 1936 goto out_no_phychans; 1937 } 1938 1939 for (i = 0; i < vd->channels; i++) { 1940 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 1941 1942 ch->id = i; 1943 ch->base = pl08x->base + PL080_Cx_BASE(i); 1944 spin_lock_init(&ch->lock); 1945 ch->signal = -1; 1946 1947 /* 1948 * Nomadik variants can have channels that are locked 1949 * down for the secure world only. Lock up these channels 1950 * by perpetually serving a dummy virtual channel. 1951 */ 1952 if (vd->nomadik) { 1953 u32 val; 1954 1955 val = readl(ch->base + PL080_CH_CONFIG); 1956 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 1957 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 1958 ch->locked = true; 1959 } 1960 } 1961 1962 dev_dbg(&adev->dev, "physical channel %d is %s\n", 1963 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1964 } 1965 1966 /* Register as many memcpy channels as there are physical channels */ 1967 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 1968 pl08x->vd->channels, false); 1969 if (ret <= 0) { 1970 dev_warn(&pl08x->adev->dev, 1971 "%s failed to enumerate memcpy channels - %d\n", 1972 __func__, ret); 1973 goto out_no_memcpy; 1974 } 1975 pl08x->memcpy.chancnt = ret; 1976 1977 /* Register slave channels */ 1978 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1979 pl08x->pd->num_slave_channels, true); 1980 if (ret <= 0) { 1981 dev_warn(&pl08x->adev->dev, 1982 "%s failed to enumerate slave channels - %d\n", 1983 __func__, ret); 1984 goto out_no_slave; 1985 } 1986 pl08x->slave.chancnt = ret; 1987 1988 ret = dma_async_device_register(&pl08x->memcpy); 1989 if (ret) { 1990 dev_warn(&pl08x->adev->dev, 1991 "%s failed to register memcpy as an async device - %d\n", 1992 __func__, ret); 1993 goto out_no_memcpy_reg; 1994 } 1995 1996 ret = dma_async_device_register(&pl08x->slave); 1997 if (ret) { 1998 dev_warn(&pl08x->adev->dev, 1999 "%s failed to register slave as an async device - %d\n", 2000 __func__, ret); 2001 goto out_no_slave_reg; 2002 } 2003 2004 amba_set_drvdata(adev, pl08x); 2005 init_pl08x_debugfs(pl08x); 2006 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 2007 amba_part(adev), amba_rev(adev), 2008 (unsigned long long)adev->res.start, adev->irq[0]); 2009 2010 pm_runtime_put(&adev->dev); 2011 return 0; 2012 2013 out_no_slave_reg: 2014 dma_async_device_unregister(&pl08x->memcpy); 2015 out_no_memcpy_reg: 2016 pl08x_free_virtual_channels(&pl08x->slave); 2017 out_no_slave: 2018 pl08x_free_virtual_channels(&pl08x->memcpy); 2019 out_no_memcpy: 2020 kfree(pl08x->phy_chans); 2021 out_no_phychans: 2022 free_irq(adev->irq[0], pl08x); 2023 out_no_irq: 2024 iounmap(pl08x->base); 2025 out_no_ioremap: 2026 dma_pool_destroy(pl08x->pool); 2027 out_no_lli_pool: 2028 out_no_platdata: 2029 pm_runtime_put(&adev->dev); 2030 pm_runtime_disable(&adev->dev); 2031 2032 kfree(pl08x); 2033 out_no_pl08x: 2034 amba_release_regions(adev); 2035 return ret; 2036 } 2037 2038 /* PL080 has 8 channels and the PL080 have just 2 */ 2039 static struct vendor_data vendor_pl080 = { 2040 .channels = 8, 2041 .dualmaster = true, 2042 }; 2043 2044 static struct vendor_data vendor_nomadik = { 2045 .channels = 8, 2046 .dualmaster = true, 2047 .nomadik = true, 2048 }; 2049 2050 static struct vendor_data vendor_pl081 = { 2051 .channels = 2, 2052 .dualmaster = false, 2053 }; 2054 2055 static struct amba_id pl08x_ids[] = { 2056 /* PL080 */ 2057 { 2058 .id = 0x00041080, 2059 .mask = 0x000fffff, 2060 .data = &vendor_pl080, 2061 }, 2062 /* PL081 */ 2063 { 2064 .id = 0x00041081, 2065 .mask = 0x000fffff, 2066 .data = &vendor_pl081, 2067 }, 2068 /* Nomadik 8815 PL080 variant */ 2069 { 2070 .id = 0x00280080, 2071 .mask = 0x00ffffff, 2072 .data = &vendor_nomadik, 2073 }, 2074 { 0, 0 }, 2075 }; 2076 2077 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2078 2079 static struct amba_driver pl08x_amba_driver = { 2080 .drv.name = DRIVER_NAME, 2081 .id_table = pl08x_ids, 2082 .probe = pl08x_probe, 2083 }; 2084 2085 static int __init pl08x_init(void) 2086 { 2087 int retval; 2088 retval = amba_driver_register(&pl08x_amba_driver); 2089 if (retval) 2090 printk(KERN_WARNING DRIVER_NAME 2091 "failed to register as an AMBA device (%d)\n", 2092 retval); 2093 return retval; 2094 } 2095 subsys_initcall(pl08x_init); 2096