1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * The full GNU General Public License is in this distribution in the file 19 * called COPYING. 20 * 21 * Documentation: ARM DDI 0196G == PL080 22 * Documentation: ARM DDI 0218E == PL081 23 * Documentation: S3C6410 User's Manual == PL080S 24 * 25 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 26 * channel. 27 * 28 * The PL080 has 8 channels available for simultaneous use, and the PL081 29 * has only two channels. So on these DMA controllers the number of channels 30 * and the number of incoming DMA signals are two totally different things. 31 * It is usually not possible to theoretically handle all physical signals, 32 * so a multiplexing scheme with possible denial of use is necessary. 33 * 34 * The PL080 has a dual bus master, PL081 has a single master. 35 * 36 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 37 * It differs in following aspects: 38 * - CH_CONFIG register at different offset, 39 * - separate CH_CONTROL2 register for transfer size, 40 * - bigger maximum transfer size, 41 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 42 * - no support for peripheral flow control. 43 * 44 * Memory to peripheral transfer may be visualized as 45 * Get data from memory to DMAC 46 * Until no data left 47 * On burst request from peripheral 48 * Destination burst from DMAC to peripheral 49 * Clear burst request 50 * Raise terminal count interrupt 51 * 52 * For peripherals with a FIFO: 53 * Source burst size == half the depth of the peripheral FIFO 54 * Destination burst size == the depth of the peripheral FIFO 55 * 56 * (Bursts are irrelevant for mem to mem transfers - there are no burst 57 * signals, the DMA controller will simply facilitate its AHB master.) 58 * 59 * ASSUMES default (little) endianness for DMA transfers 60 * 61 * The PL08x has two flow control settings: 62 * - DMAC flow control: the transfer size defines the number of transfers 63 * which occur for the current LLI entry, and the DMAC raises TC at the 64 * end of every LLI entry. Observed behaviour shows the DMAC listening 65 * to both the BREQ and SREQ signals (contrary to documented), 66 * transferring data if either is active. The LBREQ and LSREQ signals 67 * are ignored. 68 * 69 * - Peripheral flow control: the transfer size is ignored (and should be 70 * zero). The data is transferred from the current LLI entry, until 71 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 72 * will then move to the next LLI entry. Unsupported by PL080S. 73 */ 74 #include <linux/amba/bus.h> 75 #include <linux/amba/pl08x.h> 76 #include <linux/debugfs.h> 77 #include <linux/delay.h> 78 #include <linux/device.h> 79 #include <linux/dmaengine.h> 80 #include <linux/dmapool.h> 81 #include <linux/dma-mapping.h> 82 #include <linux/export.h> 83 #include <linux/init.h> 84 #include <linux/interrupt.h> 85 #include <linux/module.h> 86 #include <linux/of.h> 87 #include <linux/of_dma.h> 88 #include <linux/pm_runtime.h> 89 #include <linux/seq_file.h> 90 #include <linux/slab.h> 91 #include <linux/amba/pl080.h> 92 93 #include "dmaengine.h" 94 #include "virt-dma.h" 95 96 #define DRIVER_NAME "pl08xdmac" 97 98 #define PL80X_DMA_BUSWIDTHS \ 99 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 100 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 101 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 102 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 103 104 static struct amba_driver pl08x_amba_driver; 105 struct pl08x_driver_data; 106 107 /** 108 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 109 * @channels: the number of channels available in this variant 110 * @dualmaster: whether this version supports dual AHB masters or not. 111 * @nomadik: whether the channels have Nomadik security extension bits 112 * that need to be checked for permission before use and some registers are 113 * missing 114 * @pl080s: whether this version is a PL080S, which has separate register and 115 * LLI word for transfer size. 116 */ 117 struct vendor_data { 118 u8 config_offset; 119 u8 channels; 120 bool dualmaster; 121 bool nomadik; 122 bool pl080s; 123 u32 max_transfer_size; 124 }; 125 126 /** 127 * struct pl08x_bus_data - information of source or destination 128 * busses for a transfer 129 * @addr: current address 130 * @maxwidth: the maximum width of a transfer on this bus 131 * @buswidth: the width of this bus in bytes: 1, 2 or 4 132 */ 133 struct pl08x_bus_data { 134 dma_addr_t addr; 135 u8 maxwidth; 136 u8 buswidth; 137 }; 138 139 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 140 141 /** 142 * struct pl08x_phy_chan - holder for the physical channels 143 * @id: physical index to this channel 144 * @lock: a lock to use when altering an instance of this struct 145 * @serving: the virtual channel currently being served by this physical 146 * channel 147 * @locked: channel unavailable for the system, e.g. dedicated to secure 148 * world 149 */ 150 struct pl08x_phy_chan { 151 unsigned int id; 152 void __iomem *base; 153 void __iomem *reg_config; 154 spinlock_t lock; 155 struct pl08x_dma_chan *serving; 156 bool locked; 157 }; 158 159 /** 160 * struct pl08x_sg - structure containing data per sg 161 * @src_addr: src address of sg 162 * @dst_addr: dst address of sg 163 * @len: transfer len in bytes 164 * @node: node for txd's dsg_list 165 */ 166 struct pl08x_sg { 167 dma_addr_t src_addr; 168 dma_addr_t dst_addr; 169 size_t len; 170 struct list_head node; 171 }; 172 173 /** 174 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 175 * @vd: virtual DMA descriptor 176 * @dsg_list: list of children sg's 177 * @llis_bus: DMA memory address (physical) start for the LLIs 178 * @llis_va: virtual memory address start for the LLIs 179 * @cctl: control reg values for current txd 180 * @ccfg: config reg values for current txd 181 * @done: this marks completed descriptors, which should not have their 182 * mux released. 183 * @cyclic: indicate cyclic transfers 184 */ 185 struct pl08x_txd { 186 struct virt_dma_desc vd; 187 struct list_head dsg_list; 188 dma_addr_t llis_bus; 189 u32 *llis_va; 190 /* Default cctl value for LLIs */ 191 u32 cctl; 192 /* 193 * Settings to be put into the physical channel when we 194 * trigger this txd. Other registers are in llis_va[0]. 195 */ 196 u32 ccfg; 197 bool done; 198 bool cyclic; 199 }; 200 201 /** 202 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 203 * states 204 * @PL08X_CHAN_IDLE: the channel is idle 205 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 206 * channel and is running a transfer on it 207 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 208 * channel, but the transfer is currently paused 209 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 210 * channel to become available (only pertains to memcpy channels) 211 */ 212 enum pl08x_dma_chan_state { 213 PL08X_CHAN_IDLE, 214 PL08X_CHAN_RUNNING, 215 PL08X_CHAN_PAUSED, 216 PL08X_CHAN_WAITING, 217 }; 218 219 /** 220 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 221 * @vc: wrappped virtual channel 222 * @phychan: the physical channel utilized by this channel, if there is one 223 * @name: name of channel 224 * @cd: channel platform data 225 * @runtime_addr: address for RX/TX according to the runtime config 226 * @at: active transaction on this channel 227 * @lock: a lock for this channel data 228 * @host: a pointer to the host (internal use) 229 * @state: whether the channel is idle, paused, running etc 230 * @slave: whether this channel is a device (slave) or for memcpy 231 * @signal: the physical DMA request signal which this channel is using 232 * @mux_use: count of descriptors using this DMA request signal setting 233 */ 234 struct pl08x_dma_chan { 235 struct virt_dma_chan vc; 236 struct pl08x_phy_chan *phychan; 237 const char *name; 238 const struct pl08x_channel_data *cd; 239 struct dma_slave_config cfg; 240 struct pl08x_txd *at; 241 struct pl08x_driver_data *host; 242 enum pl08x_dma_chan_state state; 243 bool slave; 244 int signal; 245 unsigned mux_use; 246 }; 247 248 /** 249 * struct pl08x_driver_data - the local state holder for the PL08x 250 * @slave: slave engine for this instance 251 * @memcpy: memcpy engine for this instance 252 * @base: virtual memory base (remapped) for the PL08x 253 * @adev: the corresponding AMBA (PrimeCell) bus entry 254 * @vd: vendor data for this PL08x variant 255 * @pd: platform data passed in from the platform/machine 256 * @phy_chans: array of data for the physical channels 257 * @pool: a pool for the LLI descriptors 258 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 259 * fetches 260 * @mem_buses: set to indicate memory transfers on AHB2. 261 * @lock: a spinlock for this struct 262 */ 263 struct pl08x_driver_data { 264 struct dma_device slave; 265 struct dma_device memcpy; 266 void __iomem *base; 267 struct amba_device *adev; 268 const struct vendor_data *vd; 269 struct pl08x_platform_data *pd; 270 struct pl08x_phy_chan *phy_chans; 271 struct dma_pool *pool; 272 u8 lli_buses; 273 u8 mem_buses; 274 u8 lli_words; 275 }; 276 277 /* 278 * PL08X specific defines 279 */ 280 281 /* The order of words in an LLI. */ 282 #define PL080_LLI_SRC 0 283 #define PL080_LLI_DST 1 284 #define PL080_LLI_LLI 2 285 #define PL080_LLI_CCTL 3 286 #define PL080S_LLI_CCTL2 4 287 288 /* Total words in an LLI. */ 289 #define PL080_LLI_WORDS 4 290 #define PL080S_LLI_WORDS 8 291 292 /* 293 * Number of LLIs in each LLI buffer allocated for one transfer 294 * (maximum times we call dma_pool_alloc on this pool without freeing) 295 */ 296 #define MAX_NUM_TSFR_LLIS 512 297 #define PL08X_ALIGN 8 298 299 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 300 { 301 return container_of(chan, struct pl08x_dma_chan, vc.chan); 302 } 303 304 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 305 { 306 return container_of(tx, struct pl08x_txd, vd.tx); 307 } 308 309 /* 310 * Mux handling. 311 * 312 * This gives us the DMA request input to the PL08x primecell which the 313 * peripheral described by the channel data will be routed to, possibly 314 * via a board/SoC specific external MUX. One important point to note 315 * here is that this does not depend on the physical channel. 316 */ 317 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 318 { 319 const struct pl08x_platform_data *pd = plchan->host->pd; 320 int ret; 321 322 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 323 ret = pd->get_xfer_signal(plchan->cd); 324 if (ret < 0) { 325 plchan->mux_use = 0; 326 return ret; 327 } 328 329 plchan->signal = ret; 330 } 331 return 0; 332 } 333 334 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 335 { 336 const struct pl08x_platform_data *pd = plchan->host->pd; 337 338 if (plchan->signal >= 0) { 339 WARN_ON(plchan->mux_use == 0); 340 341 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 342 pd->put_xfer_signal(plchan->cd, plchan->signal); 343 plchan->signal = -1; 344 } 345 } 346 } 347 348 /* 349 * Physical channel handling 350 */ 351 352 /* Whether a certain channel is busy or not */ 353 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 354 { 355 unsigned int val; 356 357 val = readl(ch->reg_config); 358 return val & PL080_CONFIG_ACTIVE; 359 } 360 361 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 362 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 363 { 364 if (pl08x->vd->pl080s) 365 dev_vdbg(&pl08x->adev->dev, 366 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 367 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 368 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 369 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 370 lli[PL080S_LLI_CCTL2], ccfg); 371 else 372 dev_vdbg(&pl08x->adev->dev, 373 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 374 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 375 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 376 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 377 378 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); 379 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); 380 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); 381 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); 382 383 if (pl08x->vd->pl080s) 384 writel_relaxed(lli[PL080S_LLI_CCTL2], 385 phychan->base + PL080S_CH_CONTROL2); 386 387 writel(ccfg, phychan->reg_config); 388 } 389 390 /* 391 * Set the initial DMA register values i.e. those for the first LLI 392 * The next LLI pointer and the configuration interrupt bit have 393 * been set when the LLIs were constructed. Poke them into the hardware 394 * and start the transfer. 395 */ 396 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 397 { 398 struct pl08x_driver_data *pl08x = plchan->host; 399 struct pl08x_phy_chan *phychan = plchan->phychan; 400 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 401 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 402 u32 val; 403 404 list_del(&txd->vd.node); 405 406 plchan->at = txd; 407 408 /* Wait for channel inactive */ 409 while (pl08x_phy_channel_busy(phychan)) 410 cpu_relax(); 411 412 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 413 414 /* Enable the DMA channel */ 415 /* Do not access config register until channel shows as disabled */ 416 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 417 cpu_relax(); 418 419 /* Do not access config register until channel shows as inactive */ 420 val = readl(phychan->reg_config); 421 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 422 val = readl(phychan->reg_config); 423 424 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 425 } 426 427 /* 428 * Pause the channel by setting the HALT bit. 429 * 430 * For M->P transfers, pause the DMAC first and then stop the peripheral - 431 * the FIFO can only drain if the peripheral is still requesting data. 432 * (note: this can still timeout if the DMAC FIFO never drains of data.) 433 * 434 * For P->M transfers, disable the peripheral first to stop it filling 435 * the DMAC FIFO, and then pause the DMAC. 436 */ 437 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 438 { 439 u32 val; 440 int timeout; 441 442 /* Set the HALT bit and wait for the FIFO to drain */ 443 val = readl(ch->reg_config); 444 val |= PL080_CONFIG_HALT; 445 writel(val, ch->reg_config); 446 447 /* Wait for channel inactive */ 448 for (timeout = 1000; timeout; timeout--) { 449 if (!pl08x_phy_channel_busy(ch)) 450 break; 451 udelay(1); 452 } 453 if (pl08x_phy_channel_busy(ch)) 454 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 455 } 456 457 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 458 { 459 u32 val; 460 461 /* Clear the HALT bit */ 462 val = readl(ch->reg_config); 463 val &= ~PL080_CONFIG_HALT; 464 writel(val, ch->reg_config); 465 } 466 467 /* 468 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 469 * clears any pending interrupt status. This should not be used for 470 * an on-going transfer, but as a method of shutting down a channel 471 * (eg, when it's no longer used) or terminating a transfer. 472 */ 473 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 474 struct pl08x_phy_chan *ch) 475 { 476 u32 val = readl(ch->reg_config); 477 478 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 479 PL080_CONFIG_TC_IRQ_MASK); 480 481 writel(val, ch->reg_config); 482 483 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 484 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 485 } 486 487 static inline u32 get_bytes_in_cctl(u32 cctl) 488 { 489 /* The source width defines the number of bytes */ 490 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 491 492 cctl &= PL080_CONTROL_SWIDTH_MASK; 493 494 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 495 case PL080_WIDTH_8BIT: 496 break; 497 case PL080_WIDTH_16BIT: 498 bytes *= 2; 499 break; 500 case PL080_WIDTH_32BIT: 501 bytes *= 4; 502 break; 503 } 504 return bytes; 505 } 506 507 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) 508 { 509 /* The source width defines the number of bytes */ 510 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; 511 512 cctl &= PL080_CONTROL_SWIDTH_MASK; 513 514 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 515 case PL080_WIDTH_8BIT: 516 break; 517 case PL080_WIDTH_16BIT: 518 bytes *= 2; 519 break; 520 case PL080_WIDTH_32BIT: 521 bytes *= 4; 522 break; 523 } 524 return bytes; 525 } 526 527 /* The channel should be paused when calling this */ 528 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 529 { 530 struct pl08x_driver_data *pl08x = plchan->host; 531 const u32 *llis_va, *llis_va_limit; 532 struct pl08x_phy_chan *ch; 533 dma_addr_t llis_bus; 534 struct pl08x_txd *txd; 535 u32 llis_max_words; 536 size_t bytes; 537 u32 clli; 538 539 ch = plchan->phychan; 540 txd = plchan->at; 541 542 if (!ch || !txd) 543 return 0; 544 545 /* 546 * Follow the LLIs to get the number of remaining 547 * bytes in the currently active transaction. 548 */ 549 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 550 551 /* First get the remaining bytes in the active transfer */ 552 if (pl08x->vd->pl080s) 553 bytes = get_bytes_in_cctl_pl080s( 554 readl(ch->base + PL080_CH_CONTROL), 555 readl(ch->base + PL080S_CH_CONTROL2)); 556 else 557 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 558 559 if (!clli) 560 return bytes; 561 562 llis_va = txd->llis_va; 563 llis_bus = txd->llis_bus; 564 565 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 566 BUG_ON(clli < llis_bus || clli >= llis_bus + 567 sizeof(u32) * llis_max_words); 568 569 /* 570 * Locate the next LLI - as this is an array, 571 * it's simple maths to find. 572 */ 573 llis_va += (clli - llis_bus) / sizeof(u32); 574 575 llis_va_limit = llis_va + llis_max_words; 576 577 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 578 if (pl08x->vd->pl080s) 579 bytes += get_bytes_in_cctl_pl080s( 580 llis_va[PL080_LLI_CCTL], 581 llis_va[PL080S_LLI_CCTL2]); 582 else 583 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); 584 585 /* 586 * A LLI pointer going backward terminates the LLI list 587 */ 588 if (llis_va[PL080_LLI_LLI] <= clli) 589 break; 590 } 591 592 return bytes; 593 } 594 595 /* 596 * Allocate a physical channel for a virtual channel 597 * 598 * Try to locate a physical channel to be used for this transfer. If all 599 * are taken return NULL and the requester will have to cope by using 600 * some fallback PIO mode or retrying later. 601 */ 602 static struct pl08x_phy_chan * 603 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 604 struct pl08x_dma_chan *virt_chan) 605 { 606 struct pl08x_phy_chan *ch = NULL; 607 unsigned long flags; 608 int i; 609 610 for (i = 0; i < pl08x->vd->channels; i++) { 611 ch = &pl08x->phy_chans[i]; 612 613 spin_lock_irqsave(&ch->lock, flags); 614 615 if (!ch->locked && !ch->serving) { 616 ch->serving = virt_chan; 617 spin_unlock_irqrestore(&ch->lock, flags); 618 break; 619 } 620 621 spin_unlock_irqrestore(&ch->lock, flags); 622 } 623 624 if (i == pl08x->vd->channels) { 625 /* No physical channel available, cope with it */ 626 return NULL; 627 } 628 629 return ch; 630 } 631 632 /* Mark the physical channel as free. Note, this write is atomic. */ 633 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 634 struct pl08x_phy_chan *ch) 635 { 636 ch->serving = NULL; 637 } 638 639 /* 640 * Try to allocate a physical channel. When successful, assign it to 641 * this virtual channel, and initiate the next descriptor. The 642 * virtual channel lock must be held at this point. 643 */ 644 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 645 { 646 struct pl08x_driver_data *pl08x = plchan->host; 647 struct pl08x_phy_chan *ch; 648 649 ch = pl08x_get_phy_channel(pl08x, plchan); 650 if (!ch) { 651 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 652 plchan->state = PL08X_CHAN_WAITING; 653 return; 654 } 655 656 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 657 ch->id, plchan->name); 658 659 plchan->phychan = ch; 660 plchan->state = PL08X_CHAN_RUNNING; 661 pl08x_start_next_txd(plchan); 662 } 663 664 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 665 struct pl08x_dma_chan *plchan) 666 { 667 struct pl08x_driver_data *pl08x = plchan->host; 668 669 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 670 ch->id, plchan->name); 671 672 /* 673 * We do this without taking the lock; we're really only concerned 674 * about whether this pointer is NULL or not, and we're guaranteed 675 * that this will only be called when it _already_ is non-NULL. 676 */ 677 ch->serving = plchan; 678 plchan->phychan = ch; 679 plchan->state = PL08X_CHAN_RUNNING; 680 pl08x_start_next_txd(plchan); 681 } 682 683 /* 684 * Free a physical DMA channel, potentially reallocating it to another 685 * virtual channel if we have any pending. 686 */ 687 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 688 { 689 struct pl08x_driver_data *pl08x = plchan->host; 690 struct pl08x_dma_chan *p, *next; 691 692 retry: 693 next = NULL; 694 695 /* Find a waiting virtual channel for the next transfer. */ 696 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 697 if (p->state == PL08X_CHAN_WAITING) { 698 next = p; 699 break; 700 } 701 702 if (!next) { 703 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 704 if (p->state == PL08X_CHAN_WAITING) { 705 next = p; 706 break; 707 } 708 } 709 710 /* Ensure that the physical channel is stopped */ 711 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 712 713 if (next) { 714 bool success; 715 716 /* 717 * Eww. We know this isn't going to deadlock 718 * but lockdep probably doesn't. 719 */ 720 spin_lock(&next->vc.lock); 721 /* Re-check the state now that we have the lock */ 722 success = next->state == PL08X_CHAN_WAITING; 723 if (success) 724 pl08x_phy_reassign_start(plchan->phychan, next); 725 spin_unlock(&next->vc.lock); 726 727 /* If the state changed, try to find another channel */ 728 if (!success) 729 goto retry; 730 } else { 731 /* No more jobs, so free up the physical channel */ 732 pl08x_put_phy_channel(pl08x, plchan->phychan); 733 } 734 735 plchan->phychan = NULL; 736 plchan->state = PL08X_CHAN_IDLE; 737 } 738 739 /* 740 * LLI handling 741 */ 742 743 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 744 { 745 switch (coded) { 746 case PL080_WIDTH_8BIT: 747 return 1; 748 case PL080_WIDTH_16BIT: 749 return 2; 750 case PL080_WIDTH_32BIT: 751 return 4; 752 default: 753 break; 754 } 755 BUG(); 756 return 0; 757 } 758 759 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 760 size_t tsize) 761 { 762 u32 retbits = cctl; 763 764 /* Remove all src, dst and transfer size bits */ 765 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 766 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 767 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 768 769 /* Then set the bits according to the parameters */ 770 switch (srcwidth) { 771 case 1: 772 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 773 break; 774 case 2: 775 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 776 break; 777 case 4: 778 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 779 break; 780 default: 781 BUG(); 782 break; 783 } 784 785 switch (dstwidth) { 786 case 1: 787 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 788 break; 789 case 2: 790 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 791 break; 792 case 4: 793 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 794 break; 795 default: 796 BUG(); 797 break; 798 } 799 800 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 801 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 802 return retbits; 803 } 804 805 struct pl08x_lli_build_data { 806 struct pl08x_txd *txd; 807 struct pl08x_bus_data srcbus; 808 struct pl08x_bus_data dstbus; 809 size_t remainder; 810 u32 lli_bus; 811 }; 812 813 /* 814 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 815 * victim in case src & dest are not similarly aligned. i.e. If after aligning 816 * masters address with width requirements of transfer (by sending few byte by 817 * byte data), slave is still not aligned, then its width will be reduced to 818 * BYTE. 819 * - prefers the destination bus if both available 820 * - prefers bus with fixed address (i.e. peripheral) 821 */ 822 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 823 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 824 { 825 if (!(cctl & PL080_CONTROL_DST_INCR)) { 826 *mbus = &bd->dstbus; 827 *sbus = &bd->srcbus; 828 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 829 *mbus = &bd->srcbus; 830 *sbus = &bd->dstbus; 831 } else { 832 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 833 *mbus = &bd->dstbus; 834 *sbus = &bd->srcbus; 835 } else { 836 *mbus = &bd->srcbus; 837 *sbus = &bd->dstbus; 838 } 839 } 840 } 841 842 /* 843 * Fills in one LLI for a certain transfer descriptor and advance the counter 844 */ 845 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 846 struct pl08x_lli_build_data *bd, 847 int num_llis, int len, u32 cctl, u32 cctl2) 848 { 849 u32 offset = num_llis * pl08x->lli_words; 850 u32 *llis_va = bd->txd->llis_va + offset; 851 dma_addr_t llis_bus = bd->txd->llis_bus; 852 853 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 854 855 /* Advance the offset to next LLI. */ 856 offset += pl08x->lli_words; 857 858 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 859 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 860 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 861 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 862 llis_va[PL080_LLI_CCTL] = cctl; 863 if (pl08x->vd->pl080s) 864 llis_va[PL080S_LLI_CCTL2] = cctl2; 865 866 if (cctl & PL080_CONTROL_SRC_INCR) 867 bd->srcbus.addr += len; 868 if (cctl & PL080_CONTROL_DST_INCR) 869 bd->dstbus.addr += len; 870 871 BUG_ON(bd->remainder < len); 872 873 bd->remainder -= len; 874 } 875 876 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 877 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 878 int num_llis, size_t *total_bytes) 879 { 880 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 881 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 882 (*total_bytes) += len; 883 } 884 885 #ifdef VERBOSE_DEBUG 886 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 887 const u32 *llis_va, int num_llis) 888 { 889 int i; 890 891 if (pl08x->vd->pl080s) { 892 dev_vdbg(&pl08x->adev->dev, 893 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 894 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 895 for (i = 0; i < num_llis; i++) { 896 dev_vdbg(&pl08x->adev->dev, 897 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 898 i, llis_va, llis_va[PL080_LLI_SRC], 899 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 900 llis_va[PL080_LLI_CCTL], 901 llis_va[PL080S_LLI_CCTL2]); 902 llis_va += pl08x->lli_words; 903 } 904 } else { 905 dev_vdbg(&pl08x->adev->dev, 906 "%-3s %-9s %-10s %-10s %-10s %s\n", 907 "lli", "", "csrc", "cdst", "clli", "cctl"); 908 for (i = 0; i < num_llis; i++) { 909 dev_vdbg(&pl08x->adev->dev, 910 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 911 i, llis_va, llis_va[PL080_LLI_SRC], 912 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 913 llis_va[PL080_LLI_CCTL]); 914 llis_va += pl08x->lli_words; 915 } 916 } 917 } 918 #else 919 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 920 const u32 *llis_va, int num_llis) {} 921 #endif 922 923 /* 924 * This fills in the table of LLIs for the transfer descriptor 925 * Note that we assume we never have to change the burst sizes 926 * Return 0 for error 927 */ 928 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 929 struct pl08x_txd *txd) 930 { 931 struct pl08x_bus_data *mbus, *sbus; 932 struct pl08x_lli_build_data bd; 933 int num_llis = 0; 934 u32 cctl, early_bytes = 0; 935 size_t max_bytes_per_lli, total_bytes; 936 u32 *llis_va, *last_lli; 937 struct pl08x_sg *dsg; 938 939 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 940 if (!txd->llis_va) { 941 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 942 return 0; 943 } 944 945 bd.txd = txd; 946 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 947 cctl = txd->cctl; 948 949 /* Find maximum width of the source bus */ 950 bd.srcbus.maxwidth = 951 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 952 PL080_CONTROL_SWIDTH_SHIFT); 953 954 /* Find maximum width of the destination bus */ 955 bd.dstbus.maxwidth = 956 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 957 PL080_CONTROL_DWIDTH_SHIFT); 958 959 list_for_each_entry(dsg, &txd->dsg_list, node) { 960 total_bytes = 0; 961 cctl = txd->cctl; 962 963 bd.srcbus.addr = dsg->src_addr; 964 bd.dstbus.addr = dsg->dst_addr; 965 bd.remainder = dsg->len; 966 bd.srcbus.buswidth = bd.srcbus.maxwidth; 967 bd.dstbus.buswidth = bd.dstbus.maxwidth; 968 969 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 970 971 dev_vdbg(&pl08x->adev->dev, 972 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 973 (u64)bd.srcbus.addr, 974 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 975 bd.srcbus.buswidth, 976 (u64)bd.dstbus.addr, 977 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 978 bd.dstbus.buswidth, 979 bd.remainder); 980 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 981 mbus == &bd.srcbus ? "src" : "dst", 982 sbus == &bd.srcbus ? "src" : "dst"); 983 984 /* 985 * Zero length is only allowed if all these requirements are 986 * met: 987 * - flow controller is peripheral. 988 * - src.addr is aligned to src.width 989 * - dst.addr is aligned to dst.width 990 * 991 * sg_len == 1 should be true, as there can be two cases here: 992 * 993 * - Memory addresses are contiguous and are not scattered. 994 * Here, Only one sg will be passed by user driver, with 995 * memory address and zero length. We pass this to controller 996 * and after the transfer it will receive the last burst 997 * request from peripheral and so transfer finishes. 998 * 999 * - Memory addresses are scattered and are not contiguous. 1000 * Here, Obviously as DMA controller doesn't know when a lli's 1001 * transfer gets over, it can't load next lli. So in this 1002 * case, there has to be an assumption that only one lli is 1003 * supported. Thus, we can't have scattered addresses. 1004 */ 1005 if (!bd.remainder) { 1006 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1007 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1008 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1009 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1010 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1011 __func__); 1012 return 0; 1013 } 1014 1015 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1016 !IS_BUS_ALIGNED(&bd.dstbus)) { 1017 dev_err(&pl08x->adev->dev, 1018 "%s src & dst address must be aligned to src" 1019 " & dst width if peripheral is flow controller", 1020 __func__); 1021 return 0; 1022 } 1023 1024 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1025 bd.dstbus.buswidth, 0); 1026 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1027 0, cctl, 0); 1028 break; 1029 } 1030 1031 /* 1032 * Send byte by byte for following cases 1033 * - Less than a bus width available 1034 * - until master bus is aligned 1035 */ 1036 if (bd.remainder < mbus->buswidth) 1037 early_bytes = bd.remainder; 1038 else if (!IS_BUS_ALIGNED(mbus)) { 1039 early_bytes = mbus->buswidth - 1040 (mbus->addr & (mbus->buswidth - 1)); 1041 if ((bd.remainder - early_bytes) < mbus->buswidth) 1042 early_bytes = bd.remainder; 1043 } 1044 1045 if (early_bytes) { 1046 dev_vdbg(&pl08x->adev->dev, 1047 "%s byte width LLIs (remain 0x%08zx)\n", 1048 __func__, bd.remainder); 1049 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1050 num_llis++, &total_bytes); 1051 } 1052 1053 if (bd.remainder) { 1054 /* 1055 * Master now aligned 1056 * - if slave is not then we must set its width down 1057 */ 1058 if (!IS_BUS_ALIGNED(sbus)) { 1059 dev_dbg(&pl08x->adev->dev, 1060 "%s set down bus width to one byte\n", 1061 __func__); 1062 1063 sbus->buswidth = 1; 1064 } 1065 1066 /* 1067 * Bytes transferred = tsize * src width, not 1068 * MIN(buswidths) 1069 */ 1070 max_bytes_per_lli = bd.srcbus.buswidth * 1071 pl08x->vd->max_transfer_size; 1072 dev_vdbg(&pl08x->adev->dev, 1073 "%s max bytes per lli = %zu\n", 1074 __func__, max_bytes_per_lli); 1075 1076 /* 1077 * Make largest possible LLIs until less than one bus 1078 * width left 1079 */ 1080 while (bd.remainder > (mbus->buswidth - 1)) { 1081 size_t lli_len, tsize, width; 1082 1083 /* 1084 * If enough left try to send max possible, 1085 * otherwise try to send the remainder 1086 */ 1087 lli_len = min(bd.remainder, max_bytes_per_lli); 1088 1089 /* 1090 * Check against maximum bus alignment: 1091 * Calculate actual transfer size in relation to 1092 * bus width an get a maximum remainder of the 1093 * highest bus width - 1 1094 */ 1095 width = max(mbus->buswidth, sbus->buswidth); 1096 lli_len = (lli_len / width) * width; 1097 tsize = lli_len / bd.srcbus.buswidth; 1098 1099 dev_vdbg(&pl08x->adev->dev, 1100 "%s fill lli with single lli chunk of " 1101 "size 0x%08zx (remainder 0x%08zx)\n", 1102 __func__, lli_len, bd.remainder); 1103 1104 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1105 bd.dstbus.buswidth, tsize); 1106 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1107 lli_len, cctl, tsize); 1108 total_bytes += lli_len; 1109 } 1110 1111 /* 1112 * Send any odd bytes 1113 */ 1114 if (bd.remainder) { 1115 dev_vdbg(&pl08x->adev->dev, 1116 "%s align with boundary, send odd bytes (remain %zu)\n", 1117 __func__, bd.remainder); 1118 prep_byte_width_lli(pl08x, &bd, &cctl, 1119 bd.remainder, num_llis++, &total_bytes); 1120 } 1121 } 1122 1123 if (total_bytes != dsg->len) { 1124 dev_err(&pl08x->adev->dev, 1125 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1126 __func__, total_bytes, dsg->len); 1127 return 0; 1128 } 1129 1130 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1131 dev_err(&pl08x->adev->dev, 1132 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1133 __func__, MAX_NUM_TSFR_LLIS); 1134 return 0; 1135 } 1136 } 1137 1138 llis_va = txd->llis_va; 1139 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1140 1141 if (txd->cyclic) { 1142 /* Link back to the first LLI. */ 1143 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1144 } else { 1145 /* The final LLI terminates the LLI. */ 1146 last_lli[PL080_LLI_LLI] = 0; 1147 /* The final LLI element shall also fire an interrupt. */ 1148 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1149 } 1150 1151 pl08x_dump_lli(pl08x, llis_va, num_llis); 1152 1153 return num_llis; 1154 } 1155 1156 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1157 struct pl08x_txd *txd) 1158 { 1159 struct pl08x_sg *dsg, *_dsg; 1160 1161 if (txd->llis_va) 1162 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1163 1164 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1165 list_del(&dsg->node); 1166 kfree(dsg); 1167 } 1168 1169 kfree(txd); 1170 } 1171 1172 static void pl08x_desc_free(struct virt_dma_desc *vd) 1173 { 1174 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1175 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1176 1177 dma_descriptor_unmap(&vd->tx); 1178 if (!txd->done) 1179 pl08x_release_mux(plchan); 1180 1181 pl08x_free_txd(plchan->host, txd); 1182 } 1183 1184 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1185 struct pl08x_dma_chan *plchan) 1186 { 1187 LIST_HEAD(head); 1188 1189 vchan_get_all_descriptors(&plchan->vc, &head); 1190 vchan_dma_desc_free_list(&plchan->vc, &head); 1191 } 1192 1193 /* 1194 * The DMA ENGINE API 1195 */ 1196 static void pl08x_free_chan_resources(struct dma_chan *chan) 1197 { 1198 /* Ensure all queued descriptors are freed */ 1199 vchan_free_chan_resources(to_virt_chan(chan)); 1200 } 1201 1202 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1203 struct dma_chan *chan, unsigned long flags) 1204 { 1205 struct dma_async_tx_descriptor *retval = NULL; 1206 1207 return retval; 1208 } 1209 1210 /* 1211 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1212 * If slaves are relying on interrupts to signal completion this function 1213 * must not be called with interrupts disabled. 1214 */ 1215 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1216 dma_cookie_t cookie, struct dma_tx_state *txstate) 1217 { 1218 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1219 struct virt_dma_desc *vd; 1220 unsigned long flags; 1221 enum dma_status ret; 1222 size_t bytes = 0; 1223 1224 ret = dma_cookie_status(chan, cookie, txstate); 1225 if (ret == DMA_COMPLETE) 1226 return ret; 1227 1228 /* 1229 * There's no point calculating the residue if there's 1230 * no txstate to store the value. 1231 */ 1232 if (!txstate) { 1233 if (plchan->state == PL08X_CHAN_PAUSED) 1234 ret = DMA_PAUSED; 1235 return ret; 1236 } 1237 1238 spin_lock_irqsave(&plchan->vc.lock, flags); 1239 ret = dma_cookie_status(chan, cookie, txstate); 1240 if (ret != DMA_COMPLETE) { 1241 vd = vchan_find_desc(&plchan->vc, cookie); 1242 if (vd) { 1243 /* On the issued list, so hasn't been processed yet */ 1244 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1245 struct pl08x_sg *dsg; 1246 1247 list_for_each_entry(dsg, &txd->dsg_list, node) 1248 bytes += dsg->len; 1249 } else { 1250 bytes = pl08x_getbytes_chan(plchan); 1251 } 1252 } 1253 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1254 1255 /* 1256 * This cookie not complete yet 1257 * Get number of bytes left in the active transactions and queue 1258 */ 1259 dma_set_residue(txstate, bytes); 1260 1261 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1262 ret = DMA_PAUSED; 1263 1264 /* Whether waiting or running, we're in progress */ 1265 return ret; 1266 } 1267 1268 /* PrimeCell DMA extension */ 1269 struct burst_table { 1270 u32 burstwords; 1271 u32 reg; 1272 }; 1273 1274 static const struct burst_table burst_sizes[] = { 1275 { 1276 .burstwords = 256, 1277 .reg = PL080_BSIZE_256, 1278 }, 1279 { 1280 .burstwords = 128, 1281 .reg = PL080_BSIZE_128, 1282 }, 1283 { 1284 .burstwords = 64, 1285 .reg = PL080_BSIZE_64, 1286 }, 1287 { 1288 .burstwords = 32, 1289 .reg = PL080_BSIZE_32, 1290 }, 1291 { 1292 .burstwords = 16, 1293 .reg = PL080_BSIZE_16, 1294 }, 1295 { 1296 .burstwords = 8, 1297 .reg = PL080_BSIZE_8, 1298 }, 1299 { 1300 .burstwords = 4, 1301 .reg = PL080_BSIZE_4, 1302 }, 1303 { 1304 .burstwords = 0, 1305 .reg = PL080_BSIZE_1, 1306 }, 1307 }; 1308 1309 /* 1310 * Given the source and destination available bus masks, select which 1311 * will be routed to each port. We try to have source and destination 1312 * on separate ports, but always respect the allowable settings. 1313 */ 1314 static u32 pl08x_select_bus(u8 src, u8 dst) 1315 { 1316 u32 cctl = 0; 1317 1318 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1319 cctl |= PL080_CONTROL_DST_AHB2; 1320 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1321 cctl |= PL080_CONTROL_SRC_AHB2; 1322 1323 return cctl; 1324 } 1325 1326 static u32 pl08x_cctl(u32 cctl) 1327 { 1328 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1329 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1330 PL080_CONTROL_PROT_MASK); 1331 1332 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1333 return cctl | PL080_CONTROL_PROT_SYS; 1334 } 1335 1336 static u32 pl08x_width(enum dma_slave_buswidth width) 1337 { 1338 switch (width) { 1339 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1340 return PL080_WIDTH_8BIT; 1341 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1342 return PL080_WIDTH_16BIT; 1343 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1344 return PL080_WIDTH_32BIT; 1345 default: 1346 return ~0; 1347 } 1348 } 1349 1350 static u32 pl08x_burst(u32 maxburst) 1351 { 1352 int i; 1353 1354 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1355 if (burst_sizes[i].burstwords <= maxburst) 1356 break; 1357 1358 return burst_sizes[i].reg; 1359 } 1360 1361 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1362 enum dma_slave_buswidth addr_width, u32 maxburst) 1363 { 1364 u32 width, burst, cctl = 0; 1365 1366 width = pl08x_width(addr_width); 1367 if (width == ~0) 1368 return ~0; 1369 1370 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1371 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1372 1373 /* 1374 * If this channel will only request single transfers, set this 1375 * down to ONE element. Also select one element if no maxburst 1376 * is specified. 1377 */ 1378 if (plchan->cd->single) 1379 maxburst = 1; 1380 1381 burst = pl08x_burst(maxburst); 1382 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1383 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1384 1385 return pl08x_cctl(cctl); 1386 } 1387 1388 /* 1389 * Slave transactions callback to the slave device to allow 1390 * synchronization of slave DMA signals with the DMAC enable 1391 */ 1392 static void pl08x_issue_pending(struct dma_chan *chan) 1393 { 1394 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1395 unsigned long flags; 1396 1397 spin_lock_irqsave(&plchan->vc.lock, flags); 1398 if (vchan_issue_pending(&plchan->vc)) { 1399 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1400 pl08x_phy_alloc_and_start(plchan); 1401 } 1402 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1403 } 1404 1405 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1406 { 1407 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1408 1409 if (txd) { 1410 INIT_LIST_HEAD(&txd->dsg_list); 1411 1412 /* Always enable error and terminal interrupts */ 1413 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1414 PL080_CONFIG_TC_IRQ_MASK; 1415 } 1416 return txd; 1417 } 1418 1419 /* 1420 * Initialize a descriptor to be used by memcpy submit 1421 */ 1422 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1423 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1424 size_t len, unsigned long flags) 1425 { 1426 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1427 struct pl08x_driver_data *pl08x = plchan->host; 1428 struct pl08x_txd *txd; 1429 struct pl08x_sg *dsg; 1430 int ret; 1431 1432 txd = pl08x_get_txd(plchan); 1433 if (!txd) { 1434 dev_err(&pl08x->adev->dev, 1435 "%s no memory for descriptor\n", __func__); 1436 return NULL; 1437 } 1438 1439 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1440 if (!dsg) { 1441 pl08x_free_txd(pl08x, txd); 1442 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1443 __func__); 1444 return NULL; 1445 } 1446 list_add_tail(&dsg->node, &txd->dsg_list); 1447 1448 dsg->src_addr = src; 1449 dsg->dst_addr = dest; 1450 dsg->len = len; 1451 1452 /* Set platform data for m2m */ 1453 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1454 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1455 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1456 1457 /* Both to be incremented or the code will break */ 1458 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1459 1460 if (pl08x->vd->dualmaster) 1461 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1462 pl08x->mem_buses); 1463 1464 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1465 if (!ret) { 1466 pl08x_free_txd(pl08x, txd); 1467 return NULL; 1468 } 1469 1470 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1471 } 1472 1473 static struct pl08x_txd *pl08x_init_txd( 1474 struct dma_chan *chan, 1475 enum dma_transfer_direction direction, 1476 dma_addr_t *slave_addr) 1477 { 1478 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1479 struct pl08x_driver_data *pl08x = plchan->host; 1480 struct pl08x_txd *txd; 1481 enum dma_slave_buswidth addr_width; 1482 int ret, tmp; 1483 u8 src_buses, dst_buses; 1484 u32 maxburst, cctl; 1485 1486 txd = pl08x_get_txd(plchan); 1487 if (!txd) { 1488 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1489 return NULL; 1490 } 1491 1492 /* 1493 * Set up addresses, the PrimeCell configured address 1494 * will take precedence since this may configure the 1495 * channel target address dynamically at runtime. 1496 */ 1497 if (direction == DMA_MEM_TO_DEV) { 1498 cctl = PL080_CONTROL_SRC_INCR; 1499 *slave_addr = plchan->cfg.dst_addr; 1500 addr_width = plchan->cfg.dst_addr_width; 1501 maxburst = plchan->cfg.dst_maxburst; 1502 src_buses = pl08x->mem_buses; 1503 dst_buses = plchan->cd->periph_buses; 1504 } else if (direction == DMA_DEV_TO_MEM) { 1505 cctl = PL080_CONTROL_DST_INCR; 1506 *slave_addr = plchan->cfg.src_addr; 1507 addr_width = plchan->cfg.src_addr_width; 1508 maxburst = plchan->cfg.src_maxburst; 1509 src_buses = plchan->cd->periph_buses; 1510 dst_buses = pl08x->mem_buses; 1511 } else { 1512 pl08x_free_txd(pl08x, txd); 1513 dev_err(&pl08x->adev->dev, 1514 "%s direction unsupported\n", __func__); 1515 return NULL; 1516 } 1517 1518 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1519 if (cctl == ~0) { 1520 pl08x_free_txd(pl08x, txd); 1521 dev_err(&pl08x->adev->dev, 1522 "DMA slave configuration botched?\n"); 1523 return NULL; 1524 } 1525 1526 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1527 1528 if (plchan->cfg.device_fc) 1529 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1530 PL080_FLOW_PER2MEM_PER; 1531 else 1532 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1533 PL080_FLOW_PER2MEM; 1534 1535 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1536 1537 ret = pl08x_request_mux(plchan); 1538 if (ret < 0) { 1539 pl08x_free_txd(pl08x, txd); 1540 dev_dbg(&pl08x->adev->dev, 1541 "unable to mux for transfer on %s due to platform restrictions\n", 1542 plchan->name); 1543 return NULL; 1544 } 1545 1546 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1547 plchan->signal, plchan->name); 1548 1549 /* Assign the flow control signal to this channel */ 1550 if (direction == DMA_MEM_TO_DEV) 1551 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1552 else 1553 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1554 1555 return txd; 1556 } 1557 1558 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 1559 enum dma_transfer_direction direction, 1560 dma_addr_t slave_addr, 1561 dma_addr_t buf_addr, 1562 unsigned int len) 1563 { 1564 struct pl08x_sg *dsg; 1565 1566 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1567 if (!dsg) 1568 return -ENOMEM; 1569 1570 list_add_tail(&dsg->node, &txd->dsg_list); 1571 1572 dsg->len = len; 1573 if (direction == DMA_MEM_TO_DEV) { 1574 dsg->src_addr = buf_addr; 1575 dsg->dst_addr = slave_addr; 1576 } else { 1577 dsg->src_addr = slave_addr; 1578 dsg->dst_addr = buf_addr; 1579 } 1580 1581 return 0; 1582 } 1583 1584 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1585 struct dma_chan *chan, struct scatterlist *sgl, 1586 unsigned int sg_len, enum dma_transfer_direction direction, 1587 unsigned long flags, void *context) 1588 { 1589 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1590 struct pl08x_driver_data *pl08x = plchan->host; 1591 struct pl08x_txd *txd; 1592 struct scatterlist *sg; 1593 int ret, tmp; 1594 dma_addr_t slave_addr; 1595 1596 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1597 __func__, sg_dma_len(sgl), plchan->name); 1598 1599 txd = pl08x_init_txd(chan, direction, &slave_addr); 1600 if (!txd) 1601 return NULL; 1602 1603 for_each_sg(sgl, sg, sg_len, tmp) { 1604 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1605 sg_dma_address(sg), 1606 sg_dma_len(sg)); 1607 if (ret) { 1608 pl08x_release_mux(plchan); 1609 pl08x_free_txd(pl08x, txd); 1610 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1611 __func__); 1612 return NULL; 1613 } 1614 } 1615 1616 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1617 if (!ret) { 1618 pl08x_release_mux(plchan); 1619 pl08x_free_txd(pl08x, txd); 1620 return NULL; 1621 } 1622 1623 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1624 } 1625 1626 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 1627 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1628 size_t period_len, enum dma_transfer_direction direction, 1629 unsigned long flags) 1630 { 1631 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1632 struct pl08x_driver_data *pl08x = plchan->host; 1633 struct pl08x_txd *txd; 1634 int ret, tmp; 1635 dma_addr_t slave_addr; 1636 1637 dev_dbg(&pl08x->adev->dev, 1638 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", 1639 __func__, period_len, buf_len, 1640 direction == DMA_MEM_TO_DEV ? "to" : "from", 1641 plchan->name); 1642 1643 txd = pl08x_init_txd(chan, direction, &slave_addr); 1644 if (!txd) 1645 return NULL; 1646 1647 txd->cyclic = true; 1648 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 1649 for (tmp = 0; tmp < buf_len; tmp += period_len) { 1650 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1651 buf_addr + tmp, period_len); 1652 if (ret) { 1653 pl08x_release_mux(plchan); 1654 pl08x_free_txd(pl08x, txd); 1655 return NULL; 1656 } 1657 } 1658 1659 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1660 if (!ret) { 1661 pl08x_release_mux(plchan); 1662 pl08x_free_txd(pl08x, txd); 1663 return NULL; 1664 } 1665 1666 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1667 } 1668 1669 static int pl08x_config(struct dma_chan *chan, 1670 struct dma_slave_config *config) 1671 { 1672 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1673 struct pl08x_driver_data *pl08x = plchan->host; 1674 1675 if (!plchan->slave) 1676 return -EINVAL; 1677 1678 /* Reject definitely invalid configurations */ 1679 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1680 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1681 return -EINVAL; 1682 1683 if (config->device_fc && pl08x->vd->pl080s) { 1684 dev_err(&pl08x->adev->dev, 1685 "%s: PL080S does not support peripheral flow control\n", 1686 __func__); 1687 return -EINVAL; 1688 } 1689 1690 plchan->cfg = *config; 1691 1692 return 0; 1693 } 1694 1695 static int pl08x_terminate_all(struct dma_chan *chan) 1696 { 1697 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1698 struct pl08x_driver_data *pl08x = plchan->host; 1699 unsigned long flags; 1700 1701 spin_lock_irqsave(&plchan->vc.lock, flags); 1702 if (!plchan->phychan && !plchan->at) { 1703 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1704 return 0; 1705 } 1706 1707 plchan->state = PL08X_CHAN_IDLE; 1708 1709 if (plchan->phychan) { 1710 /* 1711 * Mark physical channel as free and free any slave 1712 * signal 1713 */ 1714 pl08x_phy_free(plchan); 1715 } 1716 /* Dequeue jobs and free LLIs */ 1717 if (plchan->at) { 1718 pl08x_desc_free(&plchan->at->vd); 1719 plchan->at = NULL; 1720 } 1721 /* Dequeue jobs not yet fired as well */ 1722 pl08x_free_txd_list(pl08x, plchan); 1723 1724 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1725 1726 return 0; 1727 } 1728 1729 static int pl08x_pause(struct dma_chan *chan) 1730 { 1731 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1732 unsigned long flags; 1733 1734 /* 1735 * Anything succeeds on channels with no physical allocation and 1736 * no queued transfers. 1737 */ 1738 spin_lock_irqsave(&plchan->vc.lock, flags); 1739 if (!plchan->phychan && !plchan->at) { 1740 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1741 return 0; 1742 } 1743 1744 pl08x_pause_phy_chan(plchan->phychan); 1745 plchan->state = PL08X_CHAN_PAUSED; 1746 1747 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1748 1749 return 0; 1750 } 1751 1752 static int pl08x_resume(struct dma_chan *chan) 1753 { 1754 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1755 unsigned long flags; 1756 1757 /* 1758 * Anything succeeds on channels with no physical allocation and 1759 * no queued transfers. 1760 */ 1761 spin_lock_irqsave(&plchan->vc.lock, flags); 1762 if (!plchan->phychan && !plchan->at) { 1763 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1764 return 0; 1765 } 1766 1767 pl08x_resume_phy_chan(plchan->phychan); 1768 plchan->state = PL08X_CHAN_RUNNING; 1769 1770 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1771 1772 return 0; 1773 } 1774 1775 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1776 { 1777 struct pl08x_dma_chan *plchan; 1778 char *name = chan_id; 1779 1780 /* Reject channels for devices not bound to this driver */ 1781 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1782 return false; 1783 1784 plchan = to_pl08x_chan(chan); 1785 1786 /* Check that the channel is not taken! */ 1787 if (!strcmp(plchan->name, name)) 1788 return true; 1789 1790 return false; 1791 } 1792 EXPORT_SYMBOL_GPL(pl08x_filter_id); 1793 1794 /* 1795 * Just check that the device is there and active 1796 * TODO: turn this bit on/off depending on the number of physical channels 1797 * actually used, if it is zero... well shut it off. That will save some 1798 * power. Cut the clock at the same time. 1799 */ 1800 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1801 { 1802 /* The Nomadik variant does not have the config register */ 1803 if (pl08x->vd->nomadik) 1804 return; 1805 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1806 } 1807 1808 static irqreturn_t pl08x_irq(int irq, void *dev) 1809 { 1810 struct pl08x_driver_data *pl08x = dev; 1811 u32 mask = 0, err, tc, i; 1812 1813 /* check & clear - ERR & TC interrupts */ 1814 err = readl(pl08x->base + PL080_ERR_STATUS); 1815 if (err) { 1816 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1817 __func__, err); 1818 writel(err, pl08x->base + PL080_ERR_CLEAR); 1819 } 1820 tc = readl(pl08x->base + PL080_TC_STATUS); 1821 if (tc) 1822 writel(tc, pl08x->base + PL080_TC_CLEAR); 1823 1824 if (!err && !tc) 1825 return IRQ_NONE; 1826 1827 for (i = 0; i < pl08x->vd->channels; i++) { 1828 if (((1 << i) & err) || ((1 << i) & tc)) { 1829 /* Locate physical channel */ 1830 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1831 struct pl08x_dma_chan *plchan = phychan->serving; 1832 struct pl08x_txd *tx; 1833 1834 if (!plchan) { 1835 dev_err(&pl08x->adev->dev, 1836 "%s Error TC interrupt on unused channel: 0x%08x\n", 1837 __func__, i); 1838 continue; 1839 } 1840 1841 spin_lock(&plchan->vc.lock); 1842 tx = plchan->at; 1843 if (tx && tx->cyclic) { 1844 vchan_cyclic_callback(&tx->vd); 1845 } else if (tx) { 1846 plchan->at = NULL; 1847 /* 1848 * This descriptor is done, release its mux 1849 * reservation. 1850 */ 1851 pl08x_release_mux(plchan); 1852 tx->done = true; 1853 vchan_cookie_complete(&tx->vd); 1854 1855 /* 1856 * And start the next descriptor (if any), 1857 * otherwise free this channel. 1858 */ 1859 if (vchan_next_desc(&plchan->vc)) 1860 pl08x_start_next_txd(plchan); 1861 else 1862 pl08x_phy_free(plchan); 1863 } 1864 spin_unlock(&plchan->vc.lock); 1865 1866 mask |= (1 << i); 1867 } 1868 } 1869 1870 return mask ? IRQ_HANDLED : IRQ_NONE; 1871 } 1872 1873 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1874 { 1875 chan->slave = true; 1876 chan->name = chan->cd->bus_id; 1877 chan->cfg.src_addr = chan->cd->addr; 1878 chan->cfg.dst_addr = chan->cd->addr; 1879 } 1880 1881 /* 1882 * Initialise the DMAC memcpy/slave channels. 1883 * Make a local wrapper to hold required data 1884 */ 1885 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1886 struct dma_device *dmadev, unsigned int channels, bool slave) 1887 { 1888 struct pl08x_dma_chan *chan; 1889 int i; 1890 1891 INIT_LIST_HEAD(&dmadev->channels); 1892 1893 /* 1894 * Register as many many memcpy as we have physical channels, 1895 * we won't always be able to use all but the code will have 1896 * to cope with that situation. 1897 */ 1898 for (i = 0; i < channels; i++) { 1899 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1900 if (!chan) { 1901 dev_err(&pl08x->adev->dev, 1902 "%s no memory for channel\n", __func__); 1903 return -ENOMEM; 1904 } 1905 1906 chan->host = pl08x; 1907 chan->state = PL08X_CHAN_IDLE; 1908 chan->signal = -1; 1909 1910 if (slave) { 1911 chan->cd = &pl08x->pd->slave_channels[i]; 1912 pl08x_dma_slave_init(chan); 1913 } else { 1914 chan->cd = &pl08x->pd->memcpy_channel; 1915 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1916 if (!chan->name) { 1917 kfree(chan); 1918 return -ENOMEM; 1919 } 1920 } 1921 dev_dbg(&pl08x->adev->dev, 1922 "initialize virtual channel \"%s\"\n", 1923 chan->name); 1924 1925 chan->vc.desc_free = pl08x_desc_free; 1926 vchan_init(&chan->vc, dmadev); 1927 } 1928 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1929 i, slave ? "slave" : "memcpy"); 1930 return i; 1931 } 1932 1933 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1934 { 1935 struct pl08x_dma_chan *chan = NULL; 1936 struct pl08x_dma_chan *next; 1937 1938 list_for_each_entry_safe(chan, 1939 next, &dmadev->channels, vc.chan.device_node) { 1940 list_del(&chan->vc.chan.device_node); 1941 kfree(chan); 1942 } 1943 } 1944 1945 #ifdef CONFIG_DEBUG_FS 1946 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1947 { 1948 switch (state) { 1949 case PL08X_CHAN_IDLE: 1950 return "idle"; 1951 case PL08X_CHAN_RUNNING: 1952 return "running"; 1953 case PL08X_CHAN_PAUSED: 1954 return "paused"; 1955 case PL08X_CHAN_WAITING: 1956 return "waiting"; 1957 default: 1958 break; 1959 } 1960 return "UNKNOWN STATE"; 1961 } 1962 1963 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1964 { 1965 struct pl08x_driver_data *pl08x = s->private; 1966 struct pl08x_dma_chan *chan; 1967 struct pl08x_phy_chan *ch; 1968 unsigned long flags; 1969 int i; 1970 1971 seq_printf(s, "PL08x physical channels:\n"); 1972 seq_printf(s, "CHANNEL:\tUSER:\n"); 1973 seq_printf(s, "--------\t-----\n"); 1974 for (i = 0; i < pl08x->vd->channels; i++) { 1975 struct pl08x_dma_chan *virt_chan; 1976 1977 ch = &pl08x->phy_chans[i]; 1978 1979 spin_lock_irqsave(&ch->lock, flags); 1980 virt_chan = ch->serving; 1981 1982 seq_printf(s, "%d\t\t%s%s\n", 1983 ch->id, 1984 virt_chan ? virt_chan->name : "(none)", 1985 ch->locked ? " LOCKED" : ""); 1986 1987 spin_unlock_irqrestore(&ch->lock, flags); 1988 } 1989 1990 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1991 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1992 seq_printf(s, "--------\t------\n"); 1993 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 1994 seq_printf(s, "%s\t\t%s\n", chan->name, 1995 pl08x_state_str(chan->state)); 1996 } 1997 1998 seq_printf(s, "\nPL08x virtual slave channels:\n"); 1999 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2000 seq_printf(s, "--------\t------\n"); 2001 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2002 seq_printf(s, "%s\t\t%s\n", chan->name, 2003 pl08x_state_str(chan->state)); 2004 } 2005 2006 return 0; 2007 } 2008 2009 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 2010 { 2011 return single_open(file, pl08x_debugfs_show, inode->i_private); 2012 } 2013 2014 static const struct file_operations pl08x_debugfs_operations = { 2015 .open = pl08x_debugfs_open, 2016 .read = seq_read, 2017 .llseek = seq_lseek, 2018 .release = single_release, 2019 }; 2020 2021 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2022 { 2023 /* Expose a simple debugfs interface to view all clocks */ 2024 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2025 S_IFREG | S_IRUGO, NULL, pl08x, 2026 &pl08x_debugfs_operations); 2027 } 2028 2029 #else 2030 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2031 { 2032 } 2033 #endif 2034 2035 #ifdef CONFIG_OF 2036 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, 2037 u32 id) 2038 { 2039 struct pl08x_dma_chan *chan; 2040 2041 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2042 if (chan->signal == id) 2043 return &chan->vc.chan; 2044 } 2045 2046 return NULL; 2047 } 2048 2049 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, 2050 struct of_dma *ofdma) 2051 { 2052 struct pl08x_driver_data *pl08x = ofdma->of_dma_data; 2053 struct pl08x_channel_data *data; 2054 struct pl08x_dma_chan *chan; 2055 struct dma_chan *dma_chan; 2056 2057 if (!pl08x) 2058 return NULL; 2059 2060 if (dma_spec->args_count != 2) 2061 return NULL; 2062 2063 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); 2064 if (dma_chan) 2065 return dma_get_slave_channel(dma_chan); 2066 2067 chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data), 2068 GFP_KERNEL); 2069 if (!chan) 2070 return NULL; 2071 2072 data = (void *)&chan[1]; 2073 data->bus_id = "(none)"; 2074 data->periph_buses = dma_spec->args[1]; 2075 2076 chan->cd = data; 2077 chan->host = pl08x; 2078 chan->slave = true; 2079 chan->name = data->bus_id; 2080 chan->state = PL08X_CHAN_IDLE; 2081 chan->signal = dma_spec->args[0]; 2082 chan->vc.desc_free = pl08x_desc_free; 2083 2084 vchan_init(&chan->vc, &pl08x->slave); 2085 2086 return dma_get_slave_channel(&chan->vc.chan); 2087 } 2088 2089 static int pl08x_of_probe(struct amba_device *adev, 2090 struct pl08x_driver_data *pl08x, 2091 struct device_node *np) 2092 { 2093 struct pl08x_platform_data *pd; 2094 u32 cctl_memcpy = 0; 2095 u32 val; 2096 int ret; 2097 2098 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); 2099 if (!pd) 2100 return -ENOMEM; 2101 2102 /* Eligible bus masters for fetching LLIs */ 2103 if (of_property_read_bool(np, "lli-bus-interface-ahb1")) 2104 pd->lli_buses |= PL08X_AHB1; 2105 if (of_property_read_bool(np, "lli-bus-interface-ahb2")) 2106 pd->lli_buses |= PL08X_AHB2; 2107 if (!pd->lli_buses) { 2108 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); 2109 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; 2110 } 2111 2112 /* Eligible bus masters for memory access */ 2113 if (of_property_read_bool(np, "mem-bus-interface-ahb1")) 2114 pd->mem_buses |= PL08X_AHB1; 2115 if (of_property_read_bool(np, "mem-bus-interface-ahb2")) 2116 pd->mem_buses |= PL08X_AHB2; 2117 if (!pd->mem_buses) { 2118 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); 2119 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; 2120 } 2121 2122 /* Parse the memcpy channel properties */ 2123 ret = of_property_read_u32(np, "memcpy-burst-size", &val); 2124 if (ret) { 2125 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); 2126 val = 1; 2127 } 2128 switch (val) { 2129 default: 2130 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); 2131 /* Fall through */ 2132 case 1: 2133 cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | 2134 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; 2135 break; 2136 case 4: 2137 cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | 2138 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; 2139 break; 2140 case 8: 2141 cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | 2142 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; 2143 break; 2144 case 16: 2145 cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | 2146 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; 2147 break; 2148 case 32: 2149 cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | 2150 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; 2151 break; 2152 case 64: 2153 cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | 2154 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; 2155 break; 2156 case 128: 2157 cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | 2158 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; 2159 break; 2160 case 256: 2161 cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | 2162 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; 2163 break; 2164 } 2165 2166 ret = of_property_read_u32(np, "memcpy-bus-width", &val); 2167 if (ret) { 2168 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); 2169 val = 8; 2170 } 2171 switch (val) { 2172 default: 2173 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); 2174 /* Fall through */ 2175 case 8: 2176 cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | 2177 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 2178 break; 2179 case 16: 2180 cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | 2181 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 2182 break; 2183 case 32: 2184 cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | 2185 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 2186 break; 2187 } 2188 2189 /* This is currently the only thing making sense */ 2190 cctl_memcpy |= PL080_CONTROL_PROT_SYS; 2191 2192 /* Set up memcpy channel */ 2193 pd->memcpy_channel.bus_id = "memcpy"; 2194 pd->memcpy_channel.cctl_memcpy = cctl_memcpy; 2195 /* Use the buses that can access memory, obviously */ 2196 pd->memcpy_channel.periph_buses = pd->mem_buses; 2197 2198 pl08x->pd = pd; 2199 2200 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, 2201 pl08x); 2202 } 2203 #else 2204 static inline int pl08x_of_probe(struct amba_device *adev, 2205 struct pl08x_driver_data *pl08x, 2206 struct device_node *np) 2207 { 2208 return -EINVAL; 2209 } 2210 #endif 2211 2212 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2213 { 2214 struct pl08x_driver_data *pl08x; 2215 const struct vendor_data *vd = id->data; 2216 struct device_node *np = adev->dev.of_node; 2217 u32 tsfr_size; 2218 int ret = 0; 2219 int i; 2220 2221 ret = amba_request_regions(adev, NULL); 2222 if (ret) 2223 return ret; 2224 2225 /* Ensure that we can do DMA */ 2226 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); 2227 if (ret) 2228 goto out_no_pl08x; 2229 2230 /* Create the driver state holder */ 2231 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2232 if (!pl08x) { 2233 ret = -ENOMEM; 2234 goto out_no_pl08x; 2235 } 2236 2237 /* Initialize memcpy engine */ 2238 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2239 pl08x->memcpy.dev = &adev->dev; 2240 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2241 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2242 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2243 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2244 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2245 pl08x->memcpy.device_config = pl08x_config; 2246 pl08x->memcpy.device_pause = pl08x_pause; 2247 pl08x->memcpy.device_resume = pl08x_resume; 2248 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2249 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2250 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2251 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2252 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2253 2254 /* Initialize slave engine */ 2255 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2256 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2257 pl08x->slave.dev = &adev->dev; 2258 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2259 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2260 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2261 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2262 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2263 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2264 pl08x->slave.device_config = pl08x_config; 2265 pl08x->slave.device_pause = pl08x_pause; 2266 pl08x->slave.device_resume = pl08x_resume; 2267 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2268 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2269 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2270 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2271 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2272 2273 /* Get the platform data */ 2274 pl08x->pd = dev_get_platdata(&adev->dev); 2275 if (!pl08x->pd) { 2276 if (np) { 2277 ret = pl08x_of_probe(adev, pl08x, np); 2278 if (ret) 2279 goto out_no_platdata; 2280 } else { 2281 dev_err(&adev->dev, "no platform data supplied\n"); 2282 ret = -EINVAL; 2283 goto out_no_platdata; 2284 } 2285 } 2286 2287 /* Assign useful pointers to the driver state */ 2288 pl08x->adev = adev; 2289 pl08x->vd = vd; 2290 2291 /* By default, AHB1 only. If dualmaster, from platform */ 2292 pl08x->lli_buses = PL08X_AHB1; 2293 pl08x->mem_buses = PL08X_AHB1; 2294 if (pl08x->vd->dualmaster) { 2295 pl08x->lli_buses = pl08x->pd->lli_buses; 2296 pl08x->mem_buses = pl08x->pd->mem_buses; 2297 } 2298 2299 if (vd->pl080s) 2300 pl08x->lli_words = PL080S_LLI_WORDS; 2301 else 2302 pl08x->lli_words = PL080_LLI_WORDS; 2303 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2304 2305 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2306 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2307 tsfr_size, PL08X_ALIGN, 0); 2308 if (!pl08x->pool) { 2309 ret = -ENOMEM; 2310 goto out_no_lli_pool; 2311 } 2312 2313 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2314 if (!pl08x->base) { 2315 ret = -ENOMEM; 2316 goto out_no_ioremap; 2317 } 2318 2319 /* Turn on the PL08x */ 2320 pl08x_ensure_on(pl08x); 2321 2322 /* Attach the interrupt handler */ 2323 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2324 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2325 2326 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2327 if (ret) { 2328 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2329 __func__, adev->irq[0]); 2330 goto out_no_irq; 2331 } 2332 2333 /* Initialize physical channels */ 2334 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2335 GFP_KERNEL); 2336 if (!pl08x->phy_chans) { 2337 dev_err(&adev->dev, "%s failed to allocate " 2338 "physical channel holders\n", 2339 __func__); 2340 ret = -ENOMEM; 2341 goto out_no_phychans; 2342 } 2343 2344 for (i = 0; i < vd->channels; i++) { 2345 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2346 2347 ch->id = i; 2348 ch->base = pl08x->base + PL080_Cx_BASE(i); 2349 ch->reg_config = ch->base + vd->config_offset; 2350 spin_lock_init(&ch->lock); 2351 2352 /* 2353 * Nomadik variants can have channels that are locked 2354 * down for the secure world only. Lock up these channels 2355 * by perpetually serving a dummy virtual channel. 2356 */ 2357 if (vd->nomadik) { 2358 u32 val; 2359 2360 val = readl(ch->reg_config); 2361 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2362 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2363 ch->locked = true; 2364 } 2365 } 2366 2367 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2368 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2369 } 2370 2371 /* Register as many memcpy channels as there are physical channels */ 2372 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2373 pl08x->vd->channels, false); 2374 if (ret <= 0) { 2375 dev_warn(&pl08x->adev->dev, 2376 "%s failed to enumerate memcpy channels - %d\n", 2377 __func__, ret); 2378 goto out_no_memcpy; 2379 } 2380 2381 /* Register slave channels */ 2382 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2383 pl08x->pd->num_slave_channels, true); 2384 if (ret < 0) { 2385 dev_warn(&pl08x->adev->dev, 2386 "%s failed to enumerate slave channels - %d\n", 2387 __func__, ret); 2388 goto out_no_slave; 2389 } 2390 2391 ret = dma_async_device_register(&pl08x->memcpy); 2392 if (ret) { 2393 dev_warn(&pl08x->adev->dev, 2394 "%s failed to register memcpy as an async device - %d\n", 2395 __func__, ret); 2396 goto out_no_memcpy_reg; 2397 } 2398 2399 ret = dma_async_device_register(&pl08x->slave); 2400 if (ret) { 2401 dev_warn(&pl08x->adev->dev, 2402 "%s failed to register slave as an async device - %d\n", 2403 __func__, ret); 2404 goto out_no_slave_reg; 2405 } 2406 2407 amba_set_drvdata(adev, pl08x); 2408 init_pl08x_debugfs(pl08x); 2409 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2410 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2411 (unsigned long long)adev->res.start, adev->irq[0]); 2412 2413 return 0; 2414 2415 out_no_slave_reg: 2416 dma_async_device_unregister(&pl08x->memcpy); 2417 out_no_memcpy_reg: 2418 pl08x_free_virtual_channels(&pl08x->slave); 2419 out_no_slave: 2420 pl08x_free_virtual_channels(&pl08x->memcpy); 2421 out_no_memcpy: 2422 kfree(pl08x->phy_chans); 2423 out_no_phychans: 2424 free_irq(adev->irq[0], pl08x); 2425 out_no_irq: 2426 iounmap(pl08x->base); 2427 out_no_ioremap: 2428 dma_pool_destroy(pl08x->pool); 2429 out_no_lli_pool: 2430 out_no_platdata: 2431 kfree(pl08x); 2432 out_no_pl08x: 2433 amba_release_regions(adev); 2434 return ret; 2435 } 2436 2437 /* PL080 has 8 channels and the PL080 have just 2 */ 2438 static struct vendor_data vendor_pl080 = { 2439 .config_offset = PL080_CH_CONFIG, 2440 .channels = 8, 2441 .dualmaster = true, 2442 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2443 }; 2444 2445 static struct vendor_data vendor_nomadik = { 2446 .config_offset = PL080_CH_CONFIG, 2447 .channels = 8, 2448 .dualmaster = true, 2449 .nomadik = true, 2450 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2451 }; 2452 2453 static struct vendor_data vendor_pl080s = { 2454 .config_offset = PL080S_CH_CONFIG, 2455 .channels = 8, 2456 .pl080s = true, 2457 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 2458 }; 2459 2460 static struct vendor_data vendor_pl081 = { 2461 .config_offset = PL080_CH_CONFIG, 2462 .channels = 2, 2463 .dualmaster = false, 2464 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2465 }; 2466 2467 static struct amba_id pl08x_ids[] = { 2468 /* Samsung PL080S variant */ 2469 { 2470 .id = 0x0a141080, 2471 .mask = 0xffffffff, 2472 .data = &vendor_pl080s, 2473 }, 2474 /* PL080 */ 2475 { 2476 .id = 0x00041080, 2477 .mask = 0x000fffff, 2478 .data = &vendor_pl080, 2479 }, 2480 /* PL081 */ 2481 { 2482 .id = 0x00041081, 2483 .mask = 0x000fffff, 2484 .data = &vendor_pl081, 2485 }, 2486 /* Nomadik 8815 PL080 variant */ 2487 { 2488 .id = 0x00280080, 2489 .mask = 0x00ffffff, 2490 .data = &vendor_nomadik, 2491 }, 2492 { 0, 0 }, 2493 }; 2494 2495 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2496 2497 static struct amba_driver pl08x_amba_driver = { 2498 .drv.name = DRIVER_NAME, 2499 .id_table = pl08x_ids, 2500 .probe = pl08x_probe, 2501 }; 2502 2503 static int __init pl08x_init(void) 2504 { 2505 int retval; 2506 retval = amba_driver_register(&pl08x_amba_driver); 2507 if (retval) 2508 printk(KERN_WARNING DRIVER_NAME 2509 "failed to register as an AMBA device (%d)\n", 2510 retval); 2511 return retval; 2512 } 2513 subsys_initcall(pl08x_init); 2514