1 /* 2 * Copyright (c) 2006 ARM Ltd. 3 * Copyright (c) 2010 ST-Ericsson SA 4 * 5 * Author: Peter Pearse <peter.pearse@arm.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * The full GNU General Public License is in this distribution in the file 19 * called COPYING. 20 * 21 * Documentation: ARM DDI 0196G == PL080 22 * Documentation: ARM DDI 0218E == PL081 23 * Documentation: S3C6410 User's Manual == PL080S 24 * 25 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 26 * channel. 27 * 28 * The PL080 has 8 channels available for simultaneous use, and the PL081 29 * has only two channels. So on these DMA controllers the number of channels 30 * and the number of incoming DMA signals are two totally different things. 31 * It is usually not possible to theoretically handle all physical signals, 32 * so a multiplexing scheme with possible denial of use is necessary. 33 * 34 * The PL080 has a dual bus master, PL081 has a single master. 35 * 36 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 37 * It differs in following aspects: 38 * - CH_CONFIG register at different offset, 39 * - separate CH_CONTROL2 register for transfer size, 40 * - bigger maximum transfer size, 41 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 42 * - no support for peripheral flow control. 43 * 44 * Memory to peripheral transfer may be visualized as 45 * Get data from memory to DMAC 46 * Until no data left 47 * On burst request from peripheral 48 * Destination burst from DMAC to peripheral 49 * Clear burst request 50 * Raise terminal count interrupt 51 * 52 * For peripherals with a FIFO: 53 * Source burst size == half the depth of the peripheral FIFO 54 * Destination burst size == the depth of the peripheral FIFO 55 * 56 * (Bursts are irrelevant for mem to mem transfers - there are no burst 57 * signals, the DMA controller will simply facilitate its AHB master.) 58 * 59 * ASSUMES default (little) endianness for DMA transfers 60 * 61 * The PL08x has two flow control settings: 62 * - DMAC flow control: the transfer size defines the number of transfers 63 * which occur for the current LLI entry, and the DMAC raises TC at the 64 * end of every LLI entry. Observed behaviour shows the DMAC listening 65 * to both the BREQ and SREQ signals (contrary to documented), 66 * transferring data if either is active. The LBREQ and LSREQ signals 67 * are ignored. 68 * 69 * - Peripheral flow control: the transfer size is ignored (and should be 70 * zero). The data is transferred from the current LLI entry, until 71 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 72 * will then move to the next LLI entry. Unsupported by PL080S. 73 */ 74 #include <linux/amba/bus.h> 75 #include <linux/amba/pl08x.h> 76 #include <linux/debugfs.h> 77 #include <linux/delay.h> 78 #include <linux/device.h> 79 #include <linux/dmaengine.h> 80 #include <linux/dmapool.h> 81 #include <linux/dma-mapping.h> 82 #include <linux/export.h> 83 #include <linux/init.h> 84 #include <linux/interrupt.h> 85 #include <linux/module.h> 86 #include <linux/of.h> 87 #include <linux/of_dma.h> 88 #include <linux/pm_runtime.h> 89 #include <linux/seq_file.h> 90 #include <linux/slab.h> 91 #include <linux/amba/pl080.h> 92 93 #include "dmaengine.h" 94 #include "virt-dma.h" 95 96 #define DRIVER_NAME "pl08xdmac" 97 98 #define PL80X_DMA_BUSWIDTHS \ 99 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 100 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 101 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 102 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) 103 104 static struct amba_driver pl08x_amba_driver; 105 struct pl08x_driver_data; 106 107 /** 108 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 109 * @channels: the number of channels available in this variant 110 * @signals: the number of request signals available from the hardware 111 * @dualmaster: whether this version supports dual AHB masters or not. 112 * @nomadik: whether the channels have Nomadik security extension bits 113 * that need to be checked for permission before use and some registers are 114 * missing 115 * @pl080s: whether this version is a PL080S, which has separate register and 116 * LLI word for transfer size. 117 * @max_transfer_size: the maximum single element transfer size for this 118 * PL08x variant. 119 */ 120 struct vendor_data { 121 u8 config_offset; 122 u8 channels; 123 u8 signals; 124 bool dualmaster; 125 bool nomadik; 126 bool pl080s; 127 u32 max_transfer_size; 128 }; 129 130 /** 131 * struct pl08x_bus_data - information of source or destination 132 * busses for a transfer 133 * @addr: current address 134 * @maxwidth: the maximum width of a transfer on this bus 135 * @buswidth: the width of this bus in bytes: 1, 2 or 4 136 */ 137 struct pl08x_bus_data { 138 dma_addr_t addr; 139 u8 maxwidth; 140 u8 buswidth; 141 }; 142 143 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 144 145 /** 146 * struct pl08x_phy_chan - holder for the physical channels 147 * @id: physical index to this channel 148 * @lock: a lock to use when altering an instance of this struct 149 * @serving: the virtual channel currently being served by this physical 150 * channel 151 * @locked: channel unavailable for the system, e.g. dedicated to secure 152 * world 153 */ 154 struct pl08x_phy_chan { 155 unsigned int id; 156 void __iomem *base; 157 void __iomem *reg_config; 158 spinlock_t lock; 159 struct pl08x_dma_chan *serving; 160 bool locked; 161 }; 162 163 /** 164 * struct pl08x_sg - structure containing data per sg 165 * @src_addr: src address of sg 166 * @dst_addr: dst address of sg 167 * @len: transfer len in bytes 168 * @node: node for txd's dsg_list 169 */ 170 struct pl08x_sg { 171 dma_addr_t src_addr; 172 dma_addr_t dst_addr; 173 size_t len; 174 struct list_head node; 175 }; 176 177 /** 178 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 179 * @vd: virtual DMA descriptor 180 * @dsg_list: list of children sg's 181 * @llis_bus: DMA memory address (physical) start for the LLIs 182 * @llis_va: virtual memory address start for the LLIs 183 * @cctl: control reg values for current txd 184 * @ccfg: config reg values for current txd 185 * @done: this marks completed descriptors, which should not have their 186 * mux released. 187 * @cyclic: indicate cyclic transfers 188 */ 189 struct pl08x_txd { 190 struct virt_dma_desc vd; 191 struct list_head dsg_list; 192 dma_addr_t llis_bus; 193 u32 *llis_va; 194 /* Default cctl value for LLIs */ 195 u32 cctl; 196 /* 197 * Settings to be put into the physical channel when we 198 * trigger this txd. Other registers are in llis_va[0]. 199 */ 200 u32 ccfg; 201 bool done; 202 bool cyclic; 203 }; 204 205 /** 206 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 207 * states 208 * @PL08X_CHAN_IDLE: the channel is idle 209 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 210 * channel and is running a transfer on it 211 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 212 * channel, but the transfer is currently paused 213 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 214 * channel to become available (only pertains to memcpy channels) 215 */ 216 enum pl08x_dma_chan_state { 217 PL08X_CHAN_IDLE, 218 PL08X_CHAN_RUNNING, 219 PL08X_CHAN_PAUSED, 220 PL08X_CHAN_WAITING, 221 }; 222 223 /** 224 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 225 * @vc: wrappped virtual channel 226 * @phychan: the physical channel utilized by this channel, if there is one 227 * @name: name of channel 228 * @cd: channel platform data 229 * @runtime_addr: address for RX/TX according to the runtime config 230 * @at: active transaction on this channel 231 * @lock: a lock for this channel data 232 * @host: a pointer to the host (internal use) 233 * @state: whether the channel is idle, paused, running etc 234 * @slave: whether this channel is a device (slave) or for memcpy 235 * @signal: the physical DMA request signal which this channel is using 236 * @mux_use: count of descriptors using this DMA request signal setting 237 */ 238 struct pl08x_dma_chan { 239 struct virt_dma_chan vc; 240 struct pl08x_phy_chan *phychan; 241 const char *name; 242 struct pl08x_channel_data *cd; 243 struct dma_slave_config cfg; 244 struct pl08x_txd *at; 245 struct pl08x_driver_data *host; 246 enum pl08x_dma_chan_state state; 247 bool slave; 248 int signal; 249 unsigned mux_use; 250 }; 251 252 /** 253 * struct pl08x_driver_data - the local state holder for the PL08x 254 * @slave: slave engine for this instance 255 * @memcpy: memcpy engine for this instance 256 * @base: virtual memory base (remapped) for the PL08x 257 * @adev: the corresponding AMBA (PrimeCell) bus entry 258 * @vd: vendor data for this PL08x variant 259 * @pd: platform data passed in from the platform/machine 260 * @phy_chans: array of data for the physical channels 261 * @pool: a pool for the LLI descriptors 262 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 263 * fetches 264 * @mem_buses: set to indicate memory transfers on AHB2. 265 * @lock: a spinlock for this struct 266 */ 267 struct pl08x_driver_data { 268 struct dma_device slave; 269 struct dma_device memcpy; 270 void __iomem *base; 271 struct amba_device *adev; 272 const struct vendor_data *vd; 273 struct pl08x_platform_data *pd; 274 struct pl08x_phy_chan *phy_chans; 275 struct dma_pool *pool; 276 u8 lli_buses; 277 u8 mem_buses; 278 u8 lli_words; 279 }; 280 281 /* 282 * PL08X specific defines 283 */ 284 285 /* The order of words in an LLI. */ 286 #define PL080_LLI_SRC 0 287 #define PL080_LLI_DST 1 288 #define PL080_LLI_LLI 2 289 #define PL080_LLI_CCTL 3 290 #define PL080S_LLI_CCTL2 4 291 292 /* Total words in an LLI. */ 293 #define PL080_LLI_WORDS 4 294 #define PL080S_LLI_WORDS 8 295 296 /* 297 * Number of LLIs in each LLI buffer allocated for one transfer 298 * (maximum times we call dma_pool_alloc on this pool without freeing) 299 */ 300 #define MAX_NUM_TSFR_LLIS 512 301 #define PL08X_ALIGN 8 302 303 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 304 { 305 return container_of(chan, struct pl08x_dma_chan, vc.chan); 306 } 307 308 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 309 { 310 return container_of(tx, struct pl08x_txd, vd.tx); 311 } 312 313 /* 314 * Mux handling. 315 * 316 * This gives us the DMA request input to the PL08x primecell which the 317 * peripheral described by the channel data will be routed to, possibly 318 * via a board/SoC specific external MUX. One important point to note 319 * here is that this does not depend on the physical channel. 320 */ 321 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 322 { 323 const struct pl08x_platform_data *pd = plchan->host->pd; 324 int ret; 325 326 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 327 ret = pd->get_xfer_signal(plchan->cd); 328 if (ret < 0) { 329 plchan->mux_use = 0; 330 return ret; 331 } 332 333 plchan->signal = ret; 334 } 335 return 0; 336 } 337 338 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 339 { 340 const struct pl08x_platform_data *pd = plchan->host->pd; 341 342 if (plchan->signal >= 0) { 343 WARN_ON(plchan->mux_use == 0); 344 345 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 346 pd->put_xfer_signal(plchan->cd, plchan->signal); 347 plchan->signal = -1; 348 } 349 } 350 } 351 352 /* 353 * Physical channel handling 354 */ 355 356 /* Whether a certain channel is busy or not */ 357 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 358 { 359 unsigned int val; 360 361 val = readl(ch->reg_config); 362 return val & PL080_CONFIG_ACTIVE; 363 } 364 365 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 366 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 367 { 368 if (pl08x->vd->pl080s) 369 dev_vdbg(&pl08x->adev->dev, 370 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 371 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 372 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 373 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 374 lli[PL080S_LLI_CCTL2], ccfg); 375 else 376 dev_vdbg(&pl08x->adev->dev, 377 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 378 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 379 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 380 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 381 382 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); 383 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); 384 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); 385 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); 386 387 if (pl08x->vd->pl080s) 388 writel_relaxed(lli[PL080S_LLI_CCTL2], 389 phychan->base + PL080S_CH_CONTROL2); 390 391 writel(ccfg, phychan->reg_config); 392 } 393 394 /* 395 * Set the initial DMA register values i.e. those for the first LLI 396 * The next LLI pointer and the configuration interrupt bit have 397 * been set when the LLIs were constructed. Poke them into the hardware 398 * and start the transfer. 399 */ 400 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 401 { 402 struct pl08x_driver_data *pl08x = plchan->host; 403 struct pl08x_phy_chan *phychan = plchan->phychan; 404 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 405 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 406 u32 val; 407 408 list_del(&txd->vd.node); 409 410 plchan->at = txd; 411 412 /* Wait for channel inactive */ 413 while (pl08x_phy_channel_busy(phychan)) 414 cpu_relax(); 415 416 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 417 418 /* Enable the DMA channel */ 419 /* Do not access config register until channel shows as disabled */ 420 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 421 cpu_relax(); 422 423 /* Do not access config register until channel shows as inactive */ 424 val = readl(phychan->reg_config); 425 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 426 val = readl(phychan->reg_config); 427 428 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 429 } 430 431 /* 432 * Pause the channel by setting the HALT bit. 433 * 434 * For M->P transfers, pause the DMAC first and then stop the peripheral - 435 * the FIFO can only drain if the peripheral is still requesting data. 436 * (note: this can still timeout if the DMAC FIFO never drains of data.) 437 * 438 * For P->M transfers, disable the peripheral first to stop it filling 439 * the DMAC FIFO, and then pause the DMAC. 440 */ 441 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 442 { 443 u32 val; 444 int timeout; 445 446 /* Set the HALT bit and wait for the FIFO to drain */ 447 val = readl(ch->reg_config); 448 val |= PL080_CONFIG_HALT; 449 writel(val, ch->reg_config); 450 451 /* Wait for channel inactive */ 452 for (timeout = 1000; timeout; timeout--) { 453 if (!pl08x_phy_channel_busy(ch)) 454 break; 455 udelay(1); 456 } 457 if (pl08x_phy_channel_busy(ch)) 458 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 459 } 460 461 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 462 { 463 u32 val; 464 465 /* Clear the HALT bit */ 466 val = readl(ch->reg_config); 467 val &= ~PL080_CONFIG_HALT; 468 writel(val, ch->reg_config); 469 } 470 471 /* 472 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 473 * clears any pending interrupt status. This should not be used for 474 * an on-going transfer, but as a method of shutting down a channel 475 * (eg, when it's no longer used) or terminating a transfer. 476 */ 477 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 478 struct pl08x_phy_chan *ch) 479 { 480 u32 val = readl(ch->reg_config); 481 482 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 483 PL080_CONFIG_TC_IRQ_MASK); 484 485 writel(val, ch->reg_config); 486 487 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 488 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 489 } 490 491 static inline u32 get_bytes_in_cctl(u32 cctl) 492 { 493 /* The source width defines the number of bytes */ 494 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 495 496 cctl &= PL080_CONTROL_SWIDTH_MASK; 497 498 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 499 case PL080_WIDTH_8BIT: 500 break; 501 case PL080_WIDTH_16BIT: 502 bytes *= 2; 503 break; 504 case PL080_WIDTH_32BIT: 505 bytes *= 4; 506 break; 507 } 508 return bytes; 509 } 510 511 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) 512 { 513 /* The source width defines the number of bytes */ 514 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; 515 516 cctl &= PL080_CONTROL_SWIDTH_MASK; 517 518 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 519 case PL080_WIDTH_8BIT: 520 break; 521 case PL080_WIDTH_16BIT: 522 bytes *= 2; 523 break; 524 case PL080_WIDTH_32BIT: 525 bytes *= 4; 526 break; 527 } 528 return bytes; 529 } 530 531 /* The channel should be paused when calling this */ 532 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 533 { 534 struct pl08x_driver_data *pl08x = plchan->host; 535 const u32 *llis_va, *llis_va_limit; 536 struct pl08x_phy_chan *ch; 537 dma_addr_t llis_bus; 538 struct pl08x_txd *txd; 539 u32 llis_max_words; 540 size_t bytes; 541 u32 clli; 542 543 ch = plchan->phychan; 544 txd = plchan->at; 545 546 if (!ch || !txd) 547 return 0; 548 549 /* 550 * Follow the LLIs to get the number of remaining 551 * bytes in the currently active transaction. 552 */ 553 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 554 555 /* First get the remaining bytes in the active transfer */ 556 if (pl08x->vd->pl080s) 557 bytes = get_bytes_in_cctl_pl080s( 558 readl(ch->base + PL080_CH_CONTROL), 559 readl(ch->base + PL080S_CH_CONTROL2)); 560 else 561 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 562 563 if (!clli) 564 return bytes; 565 566 llis_va = txd->llis_va; 567 llis_bus = txd->llis_bus; 568 569 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 570 BUG_ON(clli < llis_bus || clli >= llis_bus + 571 sizeof(u32) * llis_max_words); 572 573 /* 574 * Locate the next LLI - as this is an array, 575 * it's simple maths to find. 576 */ 577 llis_va += (clli - llis_bus) / sizeof(u32); 578 579 llis_va_limit = llis_va + llis_max_words; 580 581 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 582 if (pl08x->vd->pl080s) 583 bytes += get_bytes_in_cctl_pl080s( 584 llis_va[PL080_LLI_CCTL], 585 llis_va[PL080S_LLI_CCTL2]); 586 else 587 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); 588 589 /* 590 * A LLI pointer going backward terminates the LLI list 591 */ 592 if (llis_va[PL080_LLI_LLI] <= clli) 593 break; 594 } 595 596 return bytes; 597 } 598 599 /* 600 * Allocate a physical channel for a virtual channel 601 * 602 * Try to locate a physical channel to be used for this transfer. If all 603 * are taken return NULL and the requester will have to cope by using 604 * some fallback PIO mode or retrying later. 605 */ 606 static struct pl08x_phy_chan * 607 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 608 struct pl08x_dma_chan *virt_chan) 609 { 610 struct pl08x_phy_chan *ch = NULL; 611 unsigned long flags; 612 int i; 613 614 for (i = 0; i < pl08x->vd->channels; i++) { 615 ch = &pl08x->phy_chans[i]; 616 617 spin_lock_irqsave(&ch->lock, flags); 618 619 if (!ch->locked && !ch->serving) { 620 ch->serving = virt_chan; 621 spin_unlock_irqrestore(&ch->lock, flags); 622 break; 623 } 624 625 spin_unlock_irqrestore(&ch->lock, flags); 626 } 627 628 if (i == pl08x->vd->channels) { 629 /* No physical channel available, cope with it */ 630 return NULL; 631 } 632 633 return ch; 634 } 635 636 /* Mark the physical channel as free. Note, this write is atomic. */ 637 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 638 struct pl08x_phy_chan *ch) 639 { 640 ch->serving = NULL; 641 } 642 643 /* 644 * Try to allocate a physical channel. When successful, assign it to 645 * this virtual channel, and initiate the next descriptor. The 646 * virtual channel lock must be held at this point. 647 */ 648 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 649 { 650 struct pl08x_driver_data *pl08x = plchan->host; 651 struct pl08x_phy_chan *ch; 652 653 ch = pl08x_get_phy_channel(pl08x, plchan); 654 if (!ch) { 655 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 656 plchan->state = PL08X_CHAN_WAITING; 657 return; 658 } 659 660 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 661 ch->id, plchan->name); 662 663 plchan->phychan = ch; 664 plchan->state = PL08X_CHAN_RUNNING; 665 pl08x_start_next_txd(plchan); 666 } 667 668 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 669 struct pl08x_dma_chan *plchan) 670 { 671 struct pl08x_driver_data *pl08x = plchan->host; 672 673 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 674 ch->id, plchan->name); 675 676 /* 677 * We do this without taking the lock; we're really only concerned 678 * about whether this pointer is NULL or not, and we're guaranteed 679 * that this will only be called when it _already_ is non-NULL. 680 */ 681 ch->serving = plchan; 682 plchan->phychan = ch; 683 plchan->state = PL08X_CHAN_RUNNING; 684 pl08x_start_next_txd(plchan); 685 } 686 687 /* 688 * Free a physical DMA channel, potentially reallocating it to another 689 * virtual channel if we have any pending. 690 */ 691 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 692 { 693 struct pl08x_driver_data *pl08x = plchan->host; 694 struct pl08x_dma_chan *p, *next; 695 696 retry: 697 next = NULL; 698 699 /* Find a waiting virtual channel for the next transfer. */ 700 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 701 if (p->state == PL08X_CHAN_WAITING) { 702 next = p; 703 break; 704 } 705 706 if (!next) { 707 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 708 if (p->state == PL08X_CHAN_WAITING) { 709 next = p; 710 break; 711 } 712 } 713 714 /* Ensure that the physical channel is stopped */ 715 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 716 717 if (next) { 718 bool success; 719 720 /* 721 * Eww. We know this isn't going to deadlock 722 * but lockdep probably doesn't. 723 */ 724 spin_lock(&next->vc.lock); 725 /* Re-check the state now that we have the lock */ 726 success = next->state == PL08X_CHAN_WAITING; 727 if (success) 728 pl08x_phy_reassign_start(plchan->phychan, next); 729 spin_unlock(&next->vc.lock); 730 731 /* If the state changed, try to find another channel */ 732 if (!success) 733 goto retry; 734 } else { 735 /* No more jobs, so free up the physical channel */ 736 pl08x_put_phy_channel(pl08x, plchan->phychan); 737 } 738 739 plchan->phychan = NULL; 740 plchan->state = PL08X_CHAN_IDLE; 741 } 742 743 /* 744 * LLI handling 745 */ 746 747 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 748 { 749 switch (coded) { 750 case PL080_WIDTH_8BIT: 751 return 1; 752 case PL080_WIDTH_16BIT: 753 return 2; 754 case PL080_WIDTH_32BIT: 755 return 4; 756 default: 757 break; 758 } 759 BUG(); 760 return 0; 761 } 762 763 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 764 size_t tsize) 765 { 766 u32 retbits = cctl; 767 768 /* Remove all src, dst and transfer size bits */ 769 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 770 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 771 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 772 773 /* Then set the bits according to the parameters */ 774 switch (srcwidth) { 775 case 1: 776 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 777 break; 778 case 2: 779 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 780 break; 781 case 4: 782 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 783 break; 784 default: 785 BUG(); 786 break; 787 } 788 789 switch (dstwidth) { 790 case 1: 791 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 792 break; 793 case 2: 794 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 795 break; 796 case 4: 797 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 798 break; 799 default: 800 BUG(); 801 break; 802 } 803 804 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 805 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 806 return retbits; 807 } 808 809 struct pl08x_lli_build_data { 810 struct pl08x_txd *txd; 811 struct pl08x_bus_data srcbus; 812 struct pl08x_bus_data dstbus; 813 size_t remainder; 814 u32 lli_bus; 815 }; 816 817 /* 818 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 819 * victim in case src & dest are not similarly aligned. i.e. If after aligning 820 * masters address with width requirements of transfer (by sending few byte by 821 * byte data), slave is still not aligned, then its width will be reduced to 822 * BYTE. 823 * - prefers the destination bus if both available 824 * - prefers bus with fixed address (i.e. peripheral) 825 */ 826 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 827 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 828 { 829 if (!(cctl & PL080_CONTROL_DST_INCR)) { 830 *mbus = &bd->dstbus; 831 *sbus = &bd->srcbus; 832 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 833 *mbus = &bd->srcbus; 834 *sbus = &bd->dstbus; 835 } else { 836 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 837 *mbus = &bd->dstbus; 838 *sbus = &bd->srcbus; 839 } else { 840 *mbus = &bd->srcbus; 841 *sbus = &bd->dstbus; 842 } 843 } 844 } 845 846 /* 847 * Fills in one LLI for a certain transfer descriptor and advance the counter 848 */ 849 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 850 struct pl08x_lli_build_data *bd, 851 int num_llis, int len, u32 cctl, u32 cctl2) 852 { 853 u32 offset = num_llis * pl08x->lli_words; 854 u32 *llis_va = bd->txd->llis_va + offset; 855 dma_addr_t llis_bus = bd->txd->llis_bus; 856 857 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 858 859 /* Advance the offset to next LLI. */ 860 offset += pl08x->lli_words; 861 862 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 863 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 864 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 865 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 866 llis_va[PL080_LLI_CCTL] = cctl; 867 if (pl08x->vd->pl080s) 868 llis_va[PL080S_LLI_CCTL2] = cctl2; 869 870 if (cctl & PL080_CONTROL_SRC_INCR) 871 bd->srcbus.addr += len; 872 if (cctl & PL080_CONTROL_DST_INCR) 873 bd->dstbus.addr += len; 874 875 BUG_ON(bd->remainder < len); 876 877 bd->remainder -= len; 878 } 879 880 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 881 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 882 int num_llis, size_t *total_bytes) 883 { 884 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 885 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 886 (*total_bytes) += len; 887 } 888 889 #ifdef VERBOSE_DEBUG 890 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 891 const u32 *llis_va, int num_llis) 892 { 893 int i; 894 895 if (pl08x->vd->pl080s) { 896 dev_vdbg(&pl08x->adev->dev, 897 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 898 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 899 for (i = 0; i < num_llis; i++) { 900 dev_vdbg(&pl08x->adev->dev, 901 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 902 i, llis_va, llis_va[PL080_LLI_SRC], 903 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 904 llis_va[PL080_LLI_CCTL], 905 llis_va[PL080S_LLI_CCTL2]); 906 llis_va += pl08x->lli_words; 907 } 908 } else { 909 dev_vdbg(&pl08x->adev->dev, 910 "%-3s %-9s %-10s %-10s %-10s %s\n", 911 "lli", "", "csrc", "cdst", "clli", "cctl"); 912 for (i = 0; i < num_llis; i++) { 913 dev_vdbg(&pl08x->adev->dev, 914 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 915 i, llis_va, llis_va[PL080_LLI_SRC], 916 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 917 llis_va[PL080_LLI_CCTL]); 918 llis_va += pl08x->lli_words; 919 } 920 } 921 } 922 #else 923 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 924 const u32 *llis_va, int num_llis) {} 925 #endif 926 927 /* 928 * This fills in the table of LLIs for the transfer descriptor 929 * Note that we assume we never have to change the burst sizes 930 * Return 0 for error 931 */ 932 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 933 struct pl08x_txd *txd) 934 { 935 struct pl08x_bus_data *mbus, *sbus; 936 struct pl08x_lli_build_data bd; 937 int num_llis = 0; 938 u32 cctl, early_bytes = 0; 939 size_t max_bytes_per_lli, total_bytes; 940 u32 *llis_va, *last_lli; 941 struct pl08x_sg *dsg; 942 943 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 944 if (!txd->llis_va) { 945 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 946 return 0; 947 } 948 949 bd.txd = txd; 950 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 951 cctl = txd->cctl; 952 953 /* Find maximum width of the source bus */ 954 bd.srcbus.maxwidth = 955 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 956 PL080_CONTROL_SWIDTH_SHIFT); 957 958 /* Find maximum width of the destination bus */ 959 bd.dstbus.maxwidth = 960 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 961 PL080_CONTROL_DWIDTH_SHIFT); 962 963 list_for_each_entry(dsg, &txd->dsg_list, node) { 964 total_bytes = 0; 965 cctl = txd->cctl; 966 967 bd.srcbus.addr = dsg->src_addr; 968 bd.dstbus.addr = dsg->dst_addr; 969 bd.remainder = dsg->len; 970 bd.srcbus.buswidth = bd.srcbus.maxwidth; 971 bd.dstbus.buswidth = bd.dstbus.maxwidth; 972 973 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 974 975 dev_vdbg(&pl08x->adev->dev, 976 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 977 (u64)bd.srcbus.addr, 978 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 979 bd.srcbus.buswidth, 980 (u64)bd.dstbus.addr, 981 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 982 bd.dstbus.buswidth, 983 bd.remainder); 984 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 985 mbus == &bd.srcbus ? "src" : "dst", 986 sbus == &bd.srcbus ? "src" : "dst"); 987 988 /* 989 * Zero length is only allowed if all these requirements are 990 * met: 991 * - flow controller is peripheral. 992 * - src.addr is aligned to src.width 993 * - dst.addr is aligned to dst.width 994 * 995 * sg_len == 1 should be true, as there can be two cases here: 996 * 997 * - Memory addresses are contiguous and are not scattered. 998 * Here, Only one sg will be passed by user driver, with 999 * memory address and zero length. We pass this to controller 1000 * and after the transfer it will receive the last burst 1001 * request from peripheral and so transfer finishes. 1002 * 1003 * - Memory addresses are scattered and are not contiguous. 1004 * Here, Obviously as DMA controller doesn't know when a lli's 1005 * transfer gets over, it can't load next lli. So in this 1006 * case, there has to be an assumption that only one lli is 1007 * supported. Thus, we can't have scattered addresses. 1008 */ 1009 if (!bd.remainder) { 1010 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1011 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1012 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1013 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1014 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1015 __func__); 1016 return 0; 1017 } 1018 1019 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1020 !IS_BUS_ALIGNED(&bd.dstbus)) { 1021 dev_err(&pl08x->adev->dev, 1022 "%s src & dst address must be aligned to src" 1023 " & dst width if peripheral is flow controller", 1024 __func__); 1025 return 0; 1026 } 1027 1028 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1029 bd.dstbus.buswidth, 0); 1030 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1031 0, cctl, 0); 1032 break; 1033 } 1034 1035 /* 1036 * Send byte by byte for following cases 1037 * - Less than a bus width available 1038 * - until master bus is aligned 1039 */ 1040 if (bd.remainder < mbus->buswidth) 1041 early_bytes = bd.remainder; 1042 else if (!IS_BUS_ALIGNED(mbus)) { 1043 early_bytes = mbus->buswidth - 1044 (mbus->addr & (mbus->buswidth - 1)); 1045 if ((bd.remainder - early_bytes) < mbus->buswidth) 1046 early_bytes = bd.remainder; 1047 } 1048 1049 if (early_bytes) { 1050 dev_vdbg(&pl08x->adev->dev, 1051 "%s byte width LLIs (remain 0x%08zx)\n", 1052 __func__, bd.remainder); 1053 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1054 num_llis++, &total_bytes); 1055 } 1056 1057 if (bd.remainder) { 1058 /* 1059 * Master now aligned 1060 * - if slave is not then we must set its width down 1061 */ 1062 if (!IS_BUS_ALIGNED(sbus)) { 1063 dev_dbg(&pl08x->adev->dev, 1064 "%s set down bus width to one byte\n", 1065 __func__); 1066 1067 sbus->buswidth = 1; 1068 } 1069 1070 /* 1071 * Bytes transferred = tsize * src width, not 1072 * MIN(buswidths) 1073 */ 1074 max_bytes_per_lli = bd.srcbus.buswidth * 1075 pl08x->vd->max_transfer_size; 1076 dev_vdbg(&pl08x->adev->dev, 1077 "%s max bytes per lli = %zu\n", 1078 __func__, max_bytes_per_lli); 1079 1080 /* 1081 * Make largest possible LLIs until less than one bus 1082 * width left 1083 */ 1084 while (bd.remainder > (mbus->buswidth - 1)) { 1085 size_t lli_len, tsize, width; 1086 1087 /* 1088 * If enough left try to send max possible, 1089 * otherwise try to send the remainder 1090 */ 1091 lli_len = min(bd.remainder, max_bytes_per_lli); 1092 1093 /* 1094 * Check against maximum bus alignment: 1095 * Calculate actual transfer size in relation to 1096 * bus width an get a maximum remainder of the 1097 * highest bus width - 1 1098 */ 1099 width = max(mbus->buswidth, sbus->buswidth); 1100 lli_len = (lli_len / width) * width; 1101 tsize = lli_len / bd.srcbus.buswidth; 1102 1103 dev_vdbg(&pl08x->adev->dev, 1104 "%s fill lli with single lli chunk of " 1105 "size 0x%08zx (remainder 0x%08zx)\n", 1106 __func__, lli_len, bd.remainder); 1107 1108 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1109 bd.dstbus.buswidth, tsize); 1110 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1111 lli_len, cctl, tsize); 1112 total_bytes += lli_len; 1113 } 1114 1115 /* 1116 * Send any odd bytes 1117 */ 1118 if (bd.remainder) { 1119 dev_vdbg(&pl08x->adev->dev, 1120 "%s align with boundary, send odd bytes (remain %zu)\n", 1121 __func__, bd.remainder); 1122 prep_byte_width_lli(pl08x, &bd, &cctl, 1123 bd.remainder, num_llis++, &total_bytes); 1124 } 1125 } 1126 1127 if (total_bytes != dsg->len) { 1128 dev_err(&pl08x->adev->dev, 1129 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1130 __func__, total_bytes, dsg->len); 1131 return 0; 1132 } 1133 1134 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1135 dev_err(&pl08x->adev->dev, 1136 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1137 __func__, MAX_NUM_TSFR_LLIS); 1138 return 0; 1139 } 1140 } 1141 1142 llis_va = txd->llis_va; 1143 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1144 1145 if (txd->cyclic) { 1146 /* Link back to the first LLI. */ 1147 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1148 } else { 1149 /* The final LLI terminates the LLI. */ 1150 last_lli[PL080_LLI_LLI] = 0; 1151 /* The final LLI element shall also fire an interrupt. */ 1152 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1153 } 1154 1155 pl08x_dump_lli(pl08x, llis_va, num_llis); 1156 1157 return num_llis; 1158 } 1159 1160 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1161 struct pl08x_txd *txd) 1162 { 1163 struct pl08x_sg *dsg, *_dsg; 1164 1165 if (txd->llis_va) 1166 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1167 1168 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1169 list_del(&dsg->node); 1170 kfree(dsg); 1171 } 1172 1173 kfree(txd); 1174 } 1175 1176 static void pl08x_desc_free(struct virt_dma_desc *vd) 1177 { 1178 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1179 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1180 1181 dma_descriptor_unmap(&vd->tx); 1182 if (!txd->done) 1183 pl08x_release_mux(plchan); 1184 1185 pl08x_free_txd(plchan->host, txd); 1186 } 1187 1188 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1189 struct pl08x_dma_chan *plchan) 1190 { 1191 LIST_HEAD(head); 1192 1193 vchan_get_all_descriptors(&plchan->vc, &head); 1194 vchan_dma_desc_free_list(&plchan->vc, &head); 1195 } 1196 1197 /* 1198 * The DMA ENGINE API 1199 */ 1200 static void pl08x_free_chan_resources(struct dma_chan *chan) 1201 { 1202 /* Ensure all queued descriptors are freed */ 1203 vchan_free_chan_resources(to_virt_chan(chan)); 1204 } 1205 1206 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1207 struct dma_chan *chan, unsigned long flags) 1208 { 1209 struct dma_async_tx_descriptor *retval = NULL; 1210 1211 return retval; 1212 } 1213 1214 /* 1215 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1216 * If slaves are relying on interrupts to signal completion this function 1217 * must not be called with interrupts disabled. 1218 */ 1219 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1220 dma_cookie_t cookie, struct dma_tx_state *txstate) 1221 { 1222 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1223 struct virt_dma_desc *vd; 1224 unsigned long flags; 1225 enum dma_status ret; 1226 size_t bytes = 0; 1227 1228 ret = dma_cookie_status(chan, cookie, txstate); 1229 if (ret == DMA_COMPLETE) 1230 return ret; 1231 1232 /* 1233 * There's no point calculating the residue if there's 1234 * no txstate to store the value. 1235 */ 1236 if (!txstate) { 1237 if (plchan->state == PL08X_CHAN_PAUSED) 1238 ret = DMA_PAUSED; 1239 return ret; 1240 } 1241 1242 spin_lock_irqsave(&plchan->vc.lock, flags); 1243 ret = dma_cookie_status(chan, cookie, txstate); 1244 if (ret != DMA_COMPLETE) { 1245 vd = vchan_find_desc(&plchan->vc, cookie); 1246 if (vd) { 1247 /* On the issued list, so hasn't been processed yet */ 1248 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1249 struct pl08x_sg *dsg; 1250 1251 list_for_each_entry(dsg, &txd->dsg_list, node) 1252 bytes += dsg->len; 1253 } else { 1254 bytes = pl08x_getbytes_chan(plchan); 1255 } 1256 } 1257 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1258 1259 /* 1260 * This cookie not complete yet 1261 * Get number of bytes left in the active transactions and queue 1262 */ 1263 dma_set_residue(txstate, bytes); 1264 1265 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1266 ret = DMA_PAUSED; 1267 1268 /* Whether waiting or running, we're in progress */ 1269 return ret; 1270 } 1271 1272 /* PrimeCell DMA extension */ 1273 struct burst_table { 1274 u32 burstwords; 1275 u32 reg; 1276 }; 1277 1278 static const struct burst_table burst_sizes[] = { 1279 { 1280 .burstwords = 256, 1281 .reg = PL080_BSIZE_256, 1282 }, 1283 { 1284 .burstwords = 128, 1285 .reg = PL080_BSIZE_128, 1286 }, 1287 { 1288 .burstwords = 64, 1289 .reg = PL080_BSIZE_64, 1290 }, 1291 { 1292 .burstwords = 32, 1293 .reg = PL080_BSIZE_32, 1294 }, 1295 { 1296 .burstwords = 16, 1297 .reg = PL080_BSIZE_16, 1298 }, 1299 { 1300 .burstwords = 8, 1301 .reg = PL080_BSIZE_8, 1302 }, 1303 { 1304 .burstwords = 4, 1305 .reg = PL080_BSIZE_4, 1306 }, 1307 { 1308 .burstwords = 0, 1309 .reg = PL080_BSIZE_1, 1310 }, 1311 }; 1312 1313 /* 1314 * Given the source and destination available bus masks, select which 1315 * will be routed to each port. We try to have source and destination 1316 * on separate ports, but always respect the allowable settings. 1317 */ 1318 static u32 pl08x_select_bus(u8 src, u8 dst) 1319 { 1320 u32 cctl = 0; 1321 1322 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1323 cctl |= PL080_CONTROL_DST_AHB2; 1324 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1325 cctl |= PL080_CONTROL_SRC_AHB2; 1326 1327 return cctl; 1328 } 1329 1330 static u32 pl08x_cctl(u32 cctl) 1331 { 1332 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1333 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1334 PL080_CONTROL_PROT_MASK); 1335 1336 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1337 return cctl | PL080_CONTROL_PROT_SYS; 1338 } 1339 1340 static u32 pl08x_width(enum dma_slave_buswidth width) 1341 { 1342 switch (width) { 1343 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1344 return PL080_WIDTH_8BIT; 1345 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1346 return PL080_WIDTH_16BIT; 1347 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1348 return PL080_WIDTH_32BIT; 1349 default: 1350 return ~0; 1351 } 1352 } 1353 1354 static u32 pl08x_burst(u32 maxburst) 1355 { 1356 int i; 1357 1358 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1359 if (burst_sizes[i].burstwords <= maxburst) 1360 break; 1361 1362 return burst_sizes[i].reg; 1363 } 1364 1365 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1366 enum dma_slave_buswidth addr_width, u32 maxburst) 1367 { 1368 u32 width, burst, cctl = 0; 1369 1370 width = pl08x_width(addr_width); 1371 if (width == ~0) 1372 return ~0; 1373 1374 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1375 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1376 1377 /* 1378 * If this channel will only request single transfers, set this 1379 * down to ONE element. Also select one element if no maxburst 1380 * is specified. 1381 */ 1382 if (plchan->cd->single) 1383 maxburst = 1; 1384 1385 burst = pl08x_burst(maxburst); 1386 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1387 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1388 1389 return pl08x_cctl(cctl); 1390 } 1391 1392 /* 1393 * Slave transactions callback to the slave device to allow 1394 * synchronization of slave DMA signals with the DMAC enable 1395 */ 1396 static void pl08x_issue_pending(struct dma_chan *chan) 1397 { 1398 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1399 unsigned long flags; 1400 1401 spin_lock_irqsave(&plchan->vc.lock, flags); 1402 if (vchan_issue_pending(&plchan->vc)) { 1403 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1404 pl08x_phy_alloc_and_start(plchan); 1405 } 1406 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1407 } 1408 1409 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1410 { 1411 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1412 1413 if (txd) { 1414 INIT_LIST_HEAD(&txd->dsg_list); 1415 1416 /* Always enable error and terminal interrupts */ 1417 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1418 PL080_CONFIG_TC_IRQ_MASK; 1419 } 1420 return txd; 1421 } 1422 1423 /* 1424 * Initialize a descriptor to be used by memcpy submit 1425 */ 1426 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1427 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1428 size_t len, unsigned long flags) 1429 { 1430 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1431 struct pl08x_driver_data *pl08x = plchan->host; 1432 struct pl08x_txd *txd; 1433 struct pl08x_sg *dsg; 1434 int ret; 1435 1436 txd = pl08x_get_txd(plchan); 1437 if (!txd) { 1438 dev_err(&pl08x->adev->dev, 1439 "%s no memory for descriptor\n", __func__); 1440 return NULL; 1441 } 1442 1443 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1444 if (!dsg) { 1445 pl08x_free_txd(pl08x, txd); 1446 return NULL; 1447 } 1448 list_add_tail(&dsg->node, &txd->dsg_list); 1449 1450 dsg->src_addr = src; 1451 dsg->dst_addr = dest; 1452 dsg->len = len; 1453 1454 /* Set platform data for m2m */ 1455 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1456 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1457 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1458 1459 /* Both to be incremented or the code will break */ 1460 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1461 1462 if (pl08x->vd->dualmaster) 1463 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1464 pl08x->mem_buses); 1465 1466 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1467 if (!ret) { 1468 pl08x_free_txd(pl08x, txd); 1469 return NULL; 1470 } 1471 1472 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1473 } 1474 1475 static struct pl08x_txd *pl08x_init_txd( 1476 struct dma_chan *chan, 1477 enum dma_transfer_direction direction, 1478 dma_addr_t *slave_addr) 1479 { 1480 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1481 struct pl08x_driver_data *pl08x = plchan->host; 1482 struct pl08x_txd *txd; 1483 enum dma_slave_buswidth addr_width; 1484 int ret, tmp; 1485 u8 src_buses, dst_buses; 1486 u32 maxburst, cctl; 1487 1488 txd = pl08x_get_txd(plchan); 1489 if (!txd) { 1490 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1491 return NULL; 1492 } 1493 1494 /* 1495 * Set up addresses, the PrimeCell configured address 1496 * will take precedence since this may configure the 1497 * channel target address dynamically at runtime. 1498 */ 1499 if (direction == DMA_MEM_TO_DEV) { 1500 cctl = PL080_CONTROL_SRC_INCR; 1501 *slave_addr = plchan->cfg.dst_addr; 1502 addr_width = plchan->cfg.dst_addr_width; 1503 maxburst = plchan->cfg.dst_maxburst; 1504 src_buses = pl08x->mem_buses; 1505 dst_buses = plchan->cd->periph_buses; 1506 } else if (direction == DMA_DEV_TO_MEM) { 1507 cctl = PL080_CONTROL_DST_INCR; 1508 *slave_addr = plchan->cfg.src_addr; 1509 addr_width = plchan->cfg.src_addr_width; 1510 maxburst = plchan->cfg.src_maxburst; 1511 src_buses = plchan->cd->periph_buses; 1512 dst_buses = pl08x->mem_buses; 1513 } else { 1514 pl08x_free_txd(pl08x, txd); 1515 dev_err(&pl08x->adev->dev, 1516 "%s direction unsupported\n", __func__); 1517 return NULL; 1518 } 1519 1520 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1521 if (cctl == ~0) { 1522 pl08x_free_txd(pl08x, txd); 1523 dev_err(&pl08x->adev->dev, 1524 "DMA slave configuration botched?\n"); 1525 return NULL; 1526 } 1527 1528 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1529 1530 if (plchan->cfg.device_fc) 1531 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1532 PL080_FLOW_PER2MEM_PER; 1533 else 1534 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1535 PL080_FLOW_PER2MEM; 1536 1537 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1538 1539 ret = pl08x_request_mux(plchan); 1540 if (ret < 0) { 1541 pl08x_free_txd(pl08x, txd); 1542 dev_dbg(&pl08x->adev->dev, 1543 "unable to mux for transfer on %s due to platform restrictions\n", 1544 plchan->name); 1545 return NULL; 1546 } 1547 1548 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1549 plchan->signal, plchan->name); 1550 1551 /* Assign the flow control signal to this channel */ 1552 if (direction == DMA_MEM_TO_DEV) 1553 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1554 else 1555 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1556 1557 return txd; 1558 } 1559 1560 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 1561 enum dma_transfer_direction direction, 1562 dma_addr_t slave_addr, 1563 dma_addr_t buf_addr, 1564 unsigned int len) 1565 { 1566 struct pl08x_sg *dsg; 1567 1568 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1569 if (!dsg) 1570 return -ENOMEM; 1571 1572 list_add_tail(&dsg->node, &txd->dsg_list); 1573 1574 dsg->len = len; 1575 if (direction == DMA_MEM_TO_DEV) { 1576 dsg->src_addr = buf_addr; 1577 dsg->dst_addr = slave_addr; 1578 } else { 1579 dsg->src_addr = slave_addr; 1580 dsg->dst_addr = buf_addr; 1581 } 1582 1583 return 0; 1584 } 1585 1586 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1587 struct dma_chan *chan, struct scatterlist *sgl, 1588 unsigned int sg_len, enum dma_transfer_direction direction, 1589 unsigned long flags, void *context) 1590 { 1591 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1592 struct pl08x_driver_data *pl08x = plchan->host; 1593 struct pl08x_txd *txd; 1594 struct scatterlist *sg; 1595 int ret, tmp; 1596 dma_addr_t slave_addr; 1597 1598 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1599 __func__, sg_dma_len(sgl), plchan->name); 1600 1601 txd = pl08x_init_txd(chan, direction, &slave_addr); 1602 if (!txd) 1603 return NULL; 1604 1605 for_each_sg(sgl, sg, sg_len, tmp) { 1606 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1607 sg_dma_address(sg), 1608 sg_dma_len(sg)); 1609 if (ret) { 1610 pl08x_release_mux(plchan); 1611 pl08x_free_txd(pl08x, txd); 1612 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1613 __func__); 1614 return NULL; 1615 } 1616 } 1617 1618 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1619 if (!ret) { 1620 pl08x_release_mux(plchan); 1621 pl08x_free_txd(pl08x, txd); 1622 return NULL; 1623 } 1624 1625 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1626 } 1627 1628 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 1629 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1630 size_t period_len, enum dma_transfer_direction direction, 1631 unsigned long flags) 1632 { 1633 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1634 struct pl08x_driver_data *pl08x = plchan->host; 1635 struct pl08x_txd *txd; 1636 int ret, tmp; 1637 dma_addr_t slave_addr; 1638 1639 dev_dbg(&pl08x->adev->dev, 1640 "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", 1641 __func__, period_len, buf_len, 1642 direction == DMA_MEM_TO_DEV ? "to" : "from", 1643 plchan->name); 1644 1645 txd = pl08x_init_txd(chan, direction, &slave_addr); 1646 if (!txd) 1647 return NULL; 1648 1649 txd->cyclic = true; 1650 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 1651 for (tmp = 0; tmp < buf_len; tmp += period_len) { 1652 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1653 buf_addr + tmp, period_len); 1654 if (ret) { 1655 pl08x_release_mux(plchan); 1656 pl08x_free_txd(pl08x, txd); 1657 return NULL; 1658 } 1659 } 1660 1661 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1662 if (!ret) { 1663 pl08x_release_mux(plchan); 1664 pl08x_free_txd(pl08x, txd); 1665 return NULL; 1666 } 1667 1668 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1669 } 1670 1671 static int pl08x_config(struct dma_chan *chan, 1672 struct dma_slave_config *config) 1673 { 1674 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1675 struct pl08x_driver_data *pl08x = plchan->host; 1676 1677 if (!plchan->slave) 1678 return -EINVAL; 1679 1680 /* Reject definitely invalid configurations */ 1681 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1682 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1683 return -EINVAL; 1684 1685 if (config->device_fc && pl08x->vd->pl080s) { 1686 dev_err(&pl08x->adev->dev, 1687 "%s: PL080S does not support peripheral flow control\n", 1688 __func__); 1689 return -EINVAL; 1690 } 1691 1692 plchan->cfg = *config; 1693 1694 return 0; 1695 } 1696 1697 static int pl08x_terminate_all(struct dma_chan *chan) 1698 { 1699 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1700 struct pl08x_driver_data *pl08x = plchan->host; 1701 unsigned long flags; 1702 1703 spin_lock_irqsave(&plchan->vc.lock, flags); 1704 if (!plchan->phychan && !plchan->at) { 1705 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1706 return 0; 1707 } 1708 1709 plchan->state = PL08X_CHAN_IDLE; 1710 1711 if (plchan->phychan) { 1712 /* 1713 * Mark physical channel as free and free any slave 1714 * signal 1715 */ 1716 pl08x_phy_free(plchan); 1717 } 1718 /* Dequeue jobs and free LLIs */ 1719 if (plchan->at) { 1720 pl08x_desc_free(&plchan->at->vd); 1721 plchan->at = NULL; 1722 } 1723 /* Dequeue jobs not yet fired as well */ 1724 pl08x_free_txd_list(pl08x, plchan); 1725 1726 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1727 1728 return 0; 1729 } 1730 1731 static int pl08x_pause(struct dma_chan *chan) 1732 { 1733 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1734 unsigned long flags; 1735 1736 /* 1737 * Anything succeeds on channels with no physical allocation and 1738 * no queued transfers. 1739 */ 1740 spin_lock_irqsave(&plchan->vc.lock, flags); 1741 if (!plchan->phychan && !plchan->at) { 1742 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1743 return 0; 1744 } 1745 1746 pl08x_pause_phy_chan(plchan->phychan); 1747 plchan->state = PL08X_CHAN_PAUSED; 1748 1749 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1750 1751 return 0; 1752 } 1753 1754 static int pl08x_resume(struct dma_chan *chan) 1755 { 1756 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1757 unsigned long flags; 1758 1759 /* 1760 * Anything succeeds on channels with no physical allocation and 1761 * no queued transfers. 1762 */ 1763 spin_lock_irqsave(&plchan->vc.lock, flags); 1764 if (!plchan->phychan && !plchan->at) { 1765 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1766 return 0; 1767 } 1768 1769 pl08x_resume_phy_chan(plchan->phychan); 1770 plchan->state = PL08X_CHAN_RUNNING; 1771 1772 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1773 1774 return 0; 1775 } 1776 1777 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1778 { 1779 struct pl08x_dma_chan *plchan; 1780 char *name = chan_id; 1781 1782 /* Reject channels for devices not bound to this driver */ 1783 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1784 return false; 1785 1786 plchan = to_pl08x_chan(chan); 1787 1788 /* Check that the channel is not taken! */ 1789 if (!strcmp(plchan->name, name)) 1790 return true; 1791 1792 return false; 1793 } 1794 EXPORT_SYMBOL_GPL(pl08x_filter_id); 1795 1796 static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id) 1797 { 1798 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1799 1800 return plchan->cd == chan_id; 1801 } 1802 1803 /* 1804 * Just check that the device is there and active 1805 * TODO: turn this bit on/off depending on the number of physical channels 1806 * actually used, if it is zero... well shut it off. That will save some 1807 * power. Cut the clock at the same time. 1808 */ 1809 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1810 { 1811 /* The Nomadik variant does not have the config register */ 1812 if (pl08x->vd->nomadik) 1813 return; 1814 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1815 } 1816 1817 static irqreturn_t pl08x_irq(int irq, void *dev) 1818 { 1819 struct pl08x_driver_data *pl08x = dev; 1820 u32 mask = 0, err, tc, i; 1821 1822 /* check & clear - ERR & TC interrupts */ 1823 err = readl(pl08x->base + PL080_ERR_STATUS); 1824 if (err) { 1825 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1826 __func__, err); 1827 writel(err, pl08x->base + PL080_ERR_CLEAR); 1828 } 1829 tc = readl(pl08x->base + PL080_TC_STATUS); 1830 if (tc) 1831 writel(tc, pl08x->base + PL080_TC_CLEAR); 1832 1833 if (!err && !tc) 1834 return IRQ_NONE; 1835 1836 for (i = 0; i < pl08x->vd->channels; i++) { 1837 if (((1 << i) & err) || ((1 << i) & tc)) { 1838 /* Locate physical channel */ 1839 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1840 struct pl08x_dma_chan *plchan = phychan->serving; 1841 struct pl08x_txd *tx; 1842 1843 if (!plchan) { 1844 dev_err(&pl08x->adev->dev, 1845 "%s Error TC interrupt on unused channel: 0x%08x\n", 1846 __func__, i); 1847 continue; 1848 } 1849 1850 spin_lock(&plchan->vc.lock); 1851 tx = plchan->at; 1852 if (tx && tx->cyclic) { 1853 vchan_cyclic_callback(&tx->vd); 1854 } else if (tx) { 1855 plchan->at = NULL; 1856 /* 1857 * This descriptor is done, release its mux 1858 * reservation. 1859 */ 1860 pl08x_release_mux(plchan); 1861 tx->done = true; 1862 vchan_cookie_complete(&tx->vd); 1863 1864 /* 1865 * And start the next descriptor (if any), 1866 * otherwise free this channel. 1867 */ 1868 if (vchan_next_desc(&plchan->vc)) 1869 pl08x_start_next_txd(plchan); 1870 else 1871 pl08x_phy_free(plchan); 1872 } 1873 spin_unlock(&plchan->vc.lock); 1874 1875 mask |= (1 << i); 1876 } 1877 } 1878 1879 return mask ? IRQ_HANDLED : IRQ_NONE; 1880 } 1881 1882 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1883 { 1884 chan->slave = true; 1885 chan->name = chan->cd->bus_id; 1886 chan->cfg.src_addr = chan->cd->addr; 1887 chan->cfg.dst_addr = chan->cd->addr; 1888 } 1889 1890 /* 1891 * Initialise the DMAC memcpy/slave channels. 1892 * Make a local wrapper to hold required data 1893 */ 1894 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1895 struct dma_device *dmadev, unsigned int channels, bool slave) 1896 { 1897 struct pl08x_dma_chan *chan; 1898 int i; 1899 1900 INIT_LIST_HEAD(&dmadev->channels); 1901 1902 /* 1903 * Register as many many memcpy as we have physical channels, 1904 * we won't always be able to use all but the code will have 1905 * to cope with that situation. 1906 */ 1907 for (i = 0; i < channels; i++) { 1908 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1909 if (!chan) 1910 return -ENOMEM; 1911 1912 chan->host = pl08x; 1913 chan->state = PL08X_CHAN_IDLE; 1914 chan->signal = -1; 1915 1916 if (slave) { 1917 chan->cd = &pl08x->pd->slave_channels[i]; 1918 /* 1919 * Some implementations have muxed signals, whereas some 1920 * use a mux in front of the signals and need dynamic 1921 * assignment of signals. 1922 */ 1923 chan->signal = i; 1924 pl08x_dma_slave_init(chan); 1925 } else { 1926 chan->cd = &pl08x->pd->memcpy_channel; 1927 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1928 if (!chan->name) { 1929 kfree(chan); 1930 return -ENOMEM; 1931 } 1932 } 1933 dev_dbg(&pl08x->adev->dev, 1934 "initialize virtual channel \"%s\"\n", 1935 chan->name); 1936 1937 chan->vc.desc_free = pl08x_desc_free; 1938 vchan_init(&chan->vc, dmadev); 1939 } 1940 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1941 i, slave ? "slave" : "memcpy"); 1942 return i; 1943 } 1944 1945 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1946 { 1947 struct pl08x_dma_chan *chan = NULL; 1948 struct pl08x_dma_chan *next; 1949 1950 list_for_each_entry_safe(chan, 1951 next, &dmadev->channels, vc.chan.device_node) { 1952 list_del(&chan->vc.chan.device_node); 1953 kfree(chan); 1954 } 1955 } 1956 1957 #ifdef CONFIG_DEBUG_FS 1958 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1959 { 1960 switch (state) { 1961 case PL08X_CHAN_IDLE: 1962 return "idle"; 1963 case PL08X_CHAN_RUNNING: 1964 return "running"; 1965 case PL08X_CHAN_PAUSED: 1966 return "paused"; 1967 case PL08X_CHAN_WAITING: 1968 return "waiting"; 1969 default: 1970 break; 1971 } 1972 return "UNKNOWN STATE"; 1973 } 1974 1975 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1976 { 1977 struct pl08x_driver_data *pl08x = s->private; 1978 struct pl08x_dma_chan *chan; 1979 struct pl08x_phy_chan *ch; 1980 unsigned long flags; 1981 int i; 1982 1983 seq_printf(s, "PL08x physical channels:\n"); 1984 seq_printf(s, "CHANNEL:\tUSER:\n"); 1985 seq_printf(s, "--------\t-----\n"); 1986 for (i = 0; i < pl08x->vd->channels; i++) { 1987 struct pl08x_dma_chan *virt_chan; 1988 1989 ch = &pl08x->phy_chans[i]; 1990 1991 spin_lock_irqsave(&ch->lock, flags); 1992 virt_chan = ch->serving; 1993 1994 seq_printf(s, "%d\t\t%s%s\n", 1995 ch->id, 1996 virt_chan ? virt_chan->name : "(none)", 1997 ch->locked ? " LOCKED" : ""); 1998 1999 spin_unlock_irqrestore(&ch->lock, flags); 2000 } 2001 2002 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 2003 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2004 seq_printf(s, "--------\t------\n"); 2005 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 2006 seq_printf(s, "%s\t\t%s\n", chan->name, 2007 pl08x_state_str(chan->state)); 2008 } 2009 2010 seq_printf(s, "\nPL08x virtual slave channels:\n"); 2011 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2012 seq_printf(s, "--------\t------\n"); 2013 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2014 seq_printf(s, "%s\t\t%s\n", chan->name, 2015 pl08x_state_str(chan->state)); 2016 } 2017 2018 return 0; 2019 } 2020 2021 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 2022 { 2023 return single_open(file, pl08x_debugfs_show, inode->i_private); 2024 } 2025 2026 static const struct file_operations pl08x_debugfs_operations = { 2027 .open = pl08x_debugfs_open, 2028 .read = seq_read, 2029 .llseek = seq_lseek, 2030 .release = single_release, 2031 }; 2032 2033 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2034 { 2035 /* Expose a simple debugfs interface to view all clocks */ 2036 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2037 S_IFREG | S_IRUGO, NULL, pl08x, 2038 &pl08x_debugfs_operations); 2039 } 2040 2041 #else 2042 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2043 { 2044 } 2045 #endif 2046 2047 #ifdef CONFIG_OF 2048 static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, 2049 u32 id) 2050 { 2051 struct pl08x_dma_chan *chan; 2052 2053 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2054 if (chan->signal == id) 2055 return &chan->vc.chan; 2056 } 2057 2058 return NULL; 2059 } 2060 2061 static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec, 2062 struct of_dma *ofdma) 2063 { 2064 struct pl08x_driver_data *pl08x = ofdma->of_dma_data; 2065 struct dma_chan *dma_chan; 2066 struct pl08x_dma_chan *plchan; 2067 2068 if (!pl08x) 2069 return NULL; 2070 2071 if (dma_spec->args_count != 2) { 2072 dev_err(&pl08x->adev->dev, 2073 "DMA channel translation requires two cells\n"); 2074 return NULL; 2075 } 2076 2077 dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]); 2078 if (!dma_chan) { 2079 dev_err(&pl08x->adev->dev, 2080 "DMA slave channel not found\n"); 2081 return NULL; 2082 } 2083 2084 plchan = to_pl08x_chan(dma_chan); 2085 dev_dbg(&pl08x->adev->dev, 2086 "translated channel for signal %d\n", 2087 dma_spec->args[0]); 2088 2089 /* Augment channel data for applicable AHB buses */ 2090 plchan->cd->periph_buses = dma_spec->args[1]; 2091 return dma_get_slave_channel(dma_chan); 2092 } 2093 2094 static int pl08x_of_probe(struct amba_device *adev, 2095 struct pl08x_driver_data *pl08x, 2096 struct device_node *np) 2097 { 2098 struct pl08x_platform_data *pd; 2099 struct pl08x_channel_data *chanp = NULL; 2100 u32 cctl_memcpy = 0; 2101 u32 val; 2102 int ret; 2103 int i; 2104 2105 pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL); 2106 if (!pd) 2107 return -ENOMEM; 2108 2109 /* Eligible bus masters for fetching LLIs */ 2110 if (of_property_read_bool(np, "lli-bus-interface-ahb1")) 2111 pd->lli_buses |= PL08X_AHB1; 2112 if (of_property_read_bool(np, "lli-bus-interface-ahb2")) 2113 pd->lli_buses |= PL08X_AHB2; 2114 if (!pd->lli_buses) { 2115 dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n"); 2116 pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2; 2117 } 2118 2119 /* Eligible bus masters for memory access */ 2120 if (of_property_read_bool(np, "mem-bus-interface-ahb1")) 2121 pd->mem_buses |= PL08X_AHB1; 2122 if (of_property_read_bool(np, "mem-bus-interface-ahb2")) 2123 pd->mem_buses |= PL08X_AHB2; 2124 if (!pd->mem_buses) { 2125 dev_info(&adev->dev, "no bus masters for memory stated, assume all\n"); 2126 pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2; 2127 } 2128 2129 /* Parse the memcpy channel properties */ 2130 ret = of_property_read_u32(np, "memcpy-burst-size", &val); 2131 if (ret) { 2132 dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n"); 2133 val = 1; 2134 } 2135 switch (val) { 2136 default: 2137 dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); 2138 /* Fall through */ 2139 case 1: 2140 cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | 2141 PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; 2142 break; 2143 case 4: 2144 cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | 2145 PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; 2146 break; 2147 case 8: 2148 cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | 2149 PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; 2150 break; 2151 case 16: 2152 cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | 2153 PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; 2154 break; 2155 case 32: 2156 cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | 2157 PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; 2158 break; 2159 case 64: 2160 cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | 2161 PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; 2162 break; 2163 case 128: 2164 cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | 2165 PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; 2166 break; 2167 case 256: 2168 cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | 2169 PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; 2170 break; 2171 } 2172 2173 ret = of_property_read_u32(np, "memcpy-bus-width", &val); 2174 if (ret) { 2175 dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n"); 2176 val = 8; 2177 } 2178 switch (val) { 2179 default: 2180 dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); 2181 /* Fall through */ 2182 case 8: 2183 cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | 2184 PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 2185 break; 2186 case 16: 2187 cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | 2188 PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 2189 break; 2190 case 32: 2191 cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | 2192 PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 2193 break; 2194 } 2195 2196 /* This is currently the only thing making sense */ 2197 cctl_memcpy |= PL080_CONTROL_PROT_SYS; 2198 2199 /* Set up memcpy channel */ 2200 pd->memcpy_channel.bus_id = "memcpy"; 2201 pd->memcpy_channel.cctl_memcpy = cctl_memcpy; 2202 /* Use the buses that can access memory, obviously */ 2203 pd->memcpy_channel.periph_buses = pd->mem_buses; 2204 2205 /* 2206 * Allocate channel data for all possible slave channels (one 2207 * for each possible signal), channels will then be allocated 2208 * for a device and have it's AHB interfaces set up at 2209 * translation time. 2210 */ 2211 chanp = devm_kcalloc(&adev->dev, 2212 pl08x->vd->signals, 2213 sizeof(struct pl08x_channel_data), 2214 GFP_KERNEL); 2215 if (!chanp) 2216 return -ENOMEM; 2217 2218 pd->slave_channels = chanp; 2219 for (i = 0; i < pl08x->vd->signals; i++) { 2220 /* chanp->periph_buses will be assigned at translation */ 2221 chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); 2222 chanp++; 2223 } 2224 pd->num_slave_channels = pl08x->vd->signals; 2225 2226 pl08x->pd = pd; 2227 2228 return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate, 2229 pl08x); 2230 } 2231 #else 2232 static inline int pl08x_of_probe(struct amba_device *adev, 2233 struct pl08x_driver_data *pl08x, 2234 struct device_node *np) 2235 { 2236 return -EINVAL; 2237 } 2238 #endif 2239 2240 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2241 { 2242 struct pl08x_driver_data *pl08x; 2243 const struct vendor_data *vd = id->data; 2244 struct device_node *np = adev->dev.of_node; 2245 u32 tsfr_size; 2246 int ret = 0; 2247 int i; 2248 2249 ret = amba_request_regions(adev, NULL); 2250 if (ret) 2251 return ret; 2252 2253 /* Ensure that we can do DMA */ 2254 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); 2255 if (ret) 2256 goto out_no_pl08x; 2257 2258 /* Create the driver state holder */ 2259 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2260 if (!pl08x) { 2261 ret = -ENOMEM; 2262 goto out_no_pl08x; 2263 } 2264 2265 /* Assign useful pointers to the driver state */ 2266 pl08x->adev = adev; 2267 pl08x->vd = vd; 2268 2269 /* Initialize memcpy engine */ 2270 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2271 pl08x->memcpy.dev = &adev->dev; 2272 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2273 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2274 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2275 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2276 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2277 pl08x->memcpy.device_config = pl08x_config; 2278 pl08x->memcpy.device_pause = pl08x_pause; 2279 pl08x->memcpy.device_resume = pl08x_resume; 2280 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2281 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2282 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2283 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); 2284 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2285 2286 /* Initialize slave engine */ 2287 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2288 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2289 pl08x->slave.dev = &adev->dev; 2290 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2291 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2292 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2293 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2294 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2295 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2296 pl08x->slave.device_config = pl08x_config; 2297 pl08x->slave.device_pause = pl08x_pause; 2298 pl08x->slave.device_resume = pl08x_resume; 2299 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2300 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; 2301 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; 2302 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2303 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 2304 2305 /* Get the platform data */ 2306 pl08x->pd = dev_get_platdata(&adev->dev); 2307 if (!pl08x->pd) { 2308 if (np) { 2309 ret = pl08x_of_probe(adev, pl08x, np); 2310 if (ret) 2311 goto out_no_platdata; 2312 } else { 2313 dev_err(&adev->dev, "no platform data supplied\n"); 2314 ret = -EINVAL; 2315 goto out_no_platdata; 2316 } 2317 } else { 2318 pl08x->slave.filter.map = pl08x->pd->slave_map; 2319 pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len; 2320 pl08x->slave.filter.fn = pl08x_filter_fn; 2321 } 2322 2323 /* By default, AHB1 only. If dualmaster, from platform */ 2324 pl08x->lli_buses = PL08X_AHB1; 2325 pl08x->mem_buses = PL08X_AHB1; 2326 if (pl08x->vd->dualmaster) { 2327 pl08x->lli_buses = pl08x->pd->lli_buses; 2328 pl08x->mem_buses = pl08x->pd->mem_buses; 2329 } 2330 2331 if (vd->pl080s) 2332 pl08x->lli_words = PL080S_LLI_WORDS; 2333 else 2334 pl08x->lli_words = PL080_LLI_WORDS; 2335 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2336 2337 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2338 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2339 tsfr_size, PL08X_ALIGN, 0); 2340 if (!pl08x->pool) { 2341 ret = -ENOMEM; 2342 goto out_no_lli_pool; 2343 } 2344 2345 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2346 if (!pl08x->base) { 2347 ret = -ENOMEM; 2348 goto out_no_ioremap; 2349 } 2350 2351 /* Turn on the PL08x */ 2352 pl08x_ensure_on(pl08x); 2353 2354 /* Attach the interrupt handler */ 2355 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2356 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2357 2358 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2359 if (ret) { 2360 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2361 __func__, adev->irq[0]); 2362 goto out_no_irq; 2363 } 2364 2365 /* Initialize physical channels */ 2366 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2367 GFP_KERNEL); 2368 if (!pl08x->phy_chans) { 2369 ret = -ENOMEM; 2370 goto out_no_phychans; 2371 } 2372 2373 for (i = 0; i < vd->channels; i++) { 2374 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2375 2376 ch->id = i; 2377 ch->base = pl08x->base + PL080_Cx_BASE(i); 2378 ch->reg_config = ch->base + vd->config_offset; 2379 spin_lock_init(&ch->lock); 2380 2381 /* 2382 * Nomadik variants can have channels that are locked 2383 * down for the secure world only. Lock up these channels 2384 * by perpetually serving a dummy virtual channel. 2385 */ 2386 if (vd->nomadik) { 2387 u32 val; 2388 2389 val = readl(ch->reg_config); 2390 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2391 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2392 ch->locked = true; 2393 } 2394 } 2395 2396 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2397 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2398 } 2399 2400 /* Register as many memcpy channels as there are physical channels */ 2401 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2402 pl08x->vd->channels, false); 2403 if (ret <= 0) { 2404 dev_warn(&pl08x->adev->dev, 2405 "%s failed to enumerate memcpy channels - %d\n", 2406 __func__, ret); 2407 goto out_no_memcpy; 2408 } 2409 2410 /* Register slave channels */ 2411 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2412 pl08x->pd->num_slave_channels, true); 2413 if (ret < 0) { 2414 dev_warn(&pl08x->adev->dev, 2415 "%s failed to enumerate slave channels - %d\n", 2416 __func__, ret); 2417 goto out_no_slave; 2418 } 2419 2420 ret = dma_async_device_register(&pl08x->memcpy); 2421 if (ret) { 2422 dev_warn(&pl08x->adev->dev, 2423 "%s failed to register memcpy as an async device - %d\n", 2424 __func__, ret); 2425 goto out_no_memcpy_reg; 2426 } 2427 2428 ret = dma_async_device_register(&pl08x->slave); 2429 if (ret) { 2430 dev_warn(&pl08x->adev->dev, 2431 "%s failed to register slave as an async device - %d\n", 2432 __func__, ret); 2433 goto out_no_slave_reg; 2434 } 2435 2436 amba_set_drvdata(adev, pl08x); 2437 init_pl08x_debugfs(pl08x); 2438 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2439 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2440 (unsigned long long)adev->res.start, adev->irq[0]); 2441 2442 return 0; 2443 2444 out_no_slave_reg: 2445 dma_async_device_unregister(&pl08x->memcpy); 2446 out_no_memcpy_reg: 2447 pl08x_free_virtual_channels(&pl08x->slave); 2448 out_no_slave: 2449 pl08x_free_virtual_channels(&pl08x->memcpy); 2450 out_no_memcpy: 2451 kfree(pl08x->phy_chans); 2452 out_no_phychans: 2453 free_irq(adev->irq[0], pl08x); 2454 out_no_irq: 2455 iounmap(pl08x->base); 2456 out_no_ioremap: 2457 dma_pool_destroy(pl08x->pool); 2458 out_no_lli_pool: 2459 out_no_platdata: 2460 kfree(pl08x); 2461 out_no_pl08x: 2462 amba_release_regions(adev); 2463 return ret; 2464 } 2465 2466 /* PL080 has 8 channels and the PL080 have just 2 */ 2467 static struct vendor_data vendor_pl080 = { 2468 .config_offset = PL080_CH_CONFIG, 2469 .channels = 8, 2470 .signals = 16, 2471 .dualmaster = true, 2472 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2473 }; 2474 2475 static struct vendor_data vendor_nomadik = { 2476 .config_offset = PL080_CH_CONFIG, 2477 .channels = 8, 2478 .signals = 32, 2479 .dualmaster = true, 2480 .nomadik = true, 2481 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2482 }; 2483 2484 static struct vendor_data vendor_pl080s = { 2485 .config_offset = PL080S_CH_CONFIG, 2486 .channels = 8, 2487 .signals = 32, 2488 .pl080s = true, 2489 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 2490 }; 2491 2492 static struct vendor_data vendor_pl081 = { 2493 .config_offset = PL080_CH_CONFIG, 2494 .channels = 2, 2495 .signals = 16, 2496 .dualmaster = false, 2497 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2498 }; 2499 2500 static struct amba_id pl08x_ids[] = { 2501 /* Samsung PL080S variant */ 2502 { 2503 .id = 0x0a141080, 2504 .mask = 0xffffffff, 2505 .data = &vendor_pl080s, 2506 }, 2507 /* PL080 */ 2508 { 2509 .id = 0x00041080, 2510 .mask = 0x000fffff, 2511 .data = &vendor_pl080, 2512 }, 2513 /* PL081 */ 2514 { 2515 .id = 0x00041081, 2516 .mask = 0x000fffff, 2517 .data = &vendor_pl081, 2518 }, 2519 /* Nomadik 8815 PL080 variant */ 2520 { 2521 .id = 0x00280080, 2522 .mask = 0x00ffffff, 2523 .data = &vendor_nomadik, 2524 }, 2525 { 0, 0 }, 2526 }; 2527 2528 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2529 2530 static struct amba_driver pl08x_amba_driver = { 2531 .drv.name = DRIVER_NAME, 2532 .id_table = pl08x_ids, 2533 .probe = pl08x_probe, 2534 }; 2535 2536 static int __init pl08x_init(void) 2537 { 2538 int retval; 2539 retval = amba_driver_register(&pl08x_amba_driver); 2540 if (retval) 2541 printk(KERN_WARNING DRIVER_NAME 2542 "failed to register as an AMBA device (%d)\n", 2543 retval); 2544 return retval; 2545 } 2546 subsys_initcall(pl08x_init); 2547