1 /* 2 * Driver for the Cirrus Logic EP93xx DMA Controller 3 * 4 * Copyright (C) 2011 Mika Westerberg 5 * 6 * DMA M2P implementation is based on the original 7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: 8 * 9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 10 * Copyright (C) 2006 Applied Data Systems 11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> 12 * 13 * This driver is based on dw_dmac and amba-pl08x drivers. 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2 of the License, or 18 * (at your option) any later version. 19 */ 20 21 #include <linux/clk.h> 22 #include <linux/init.h> 23 #include <linux/interrupt.h> 24 #include <linux/dmaengine.h> 25 #include <linux/module.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #include <mach/dma.h> 30 31 /* M2P registers */ 32 #define M2P_CONTROL 0x0000 33 #define M2P_CONTROL_STALLINT BIT(0) 34 #define M2P_CONTROL_NFBINT BIT(1) 35 #define M2P_CONTROL_CH_ERROR_INT BIT(3) 36 #define M2P_CONTROL_ENABLE BIT(4) 37 #define M2P_CONTROL_ICE BIT(6) 38 39 #define M2P_INTERRUPT 0x0004 40 #define M2P_INTERRUPT_STALL BIT(0) 41 #define M2P_INTERRUPT_NFB BIT(1) 42 #define M2P_INTERRUPT_ERROR BIT(3) 43 44 #define M2P_PPALLOC 0x0008 45 #define M2P_STATUS 0x000c 46 47 #define M2P_MAXCNT0 0x0020 48 #define M2P_BASE0 0x0024 49 #define M2P_MAXCNT1 0x0030 50 #define M2P_BASE1 0x0034 51 52 #define M2P_STATE_IDLE 0 53 #define M2P_STATE_STALL 1 54 #define M2P_STATE_ON 2 55 #define M2P_STATE_NEXT 3 56 57 /* M2M registers */ 58 #define M2M_CONTROL 0x0000 59 #define M2M_CONTROL_DONEINT BIT(2) 60 #define M2M_CONTROL_ENABLE BIT(3) 61 #define M2M_CONTROL_START BIT(4) 62 #define M2M_CONTROL_DAH BIT(11) 63 #define M2M_CONTROL_SAH BIT(12) 64 #define M2M_CONTROL_PW_SHIFT 9 65 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) 66 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) 67 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) 68 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) 69 #define M2M_CONTROL_TM_SHIFT 13 70 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) 71 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) 72 #define M2M_CONTROL_RSS_SHIFT 22 73 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) 74 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) 75 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) 76 #define M2M_CONTROL_NO_HDSK BIT(24) 77 #define M2M_CONTROL_PWSC_SHIFT 25 78 79 #define M2M_INTERRUPT 0x0004 80 #define M2M_INTERRUPT_DONEINT BIT(1) 81 82 #define M2M_BCR0 0x0010 83 #define M2M_BCR1 0x0014 84 #define M2M_SAR_BASE0 0x0018 85 #define M2M_SAR_BASE1 0x001c 86 #define M2M_DAR_BASE0 0x002c 87 #define M2M_DAR_BASE1 0x0030 88 89 #define DMA_MAX_CHAN_BYTES 0xffff 90 #define DMA_MAX_CHAN_DESCRIPTORS 32 91 92 struct ep93xx_dma_engine; 93 94 /** 95 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor 96 * @src_addr: source address of the transaction 97 * @dst_addr: destination address of the transaction 98 * @size: size of the transaction (in bytes) 99 * @complete: this descriptor is completed 100 * @txd: dmaengine API descriptor 101 * @tx_list: list of linked descriptors 102 * @node: link used for putting this into a channel queue 103 */ 104 struct ep93xx_dma_desc { 105 u32 src_addr; 106 u32 dst_addr; 107 size_t size; 108 bool complete; 109 struct dma_async_tx_descriptor txd; 110 struct list_head tx_list; 111 struct list_head node; 112 }; 113 114 /** 115 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel 116 * @chan: dmaengine API channel 117 * @edma: pointer to to the engine device 118 * @regs: memory mapped registers 119 * @irq: interrupt number of the channel 120 * @clk: clock used by this channel 121 * @tasklet: channel specific tasklet used for callbacks 122 * @lock: lock protecting the fields following 123 * @flags: flags for the channel 124 * @buffer: which buffer to use next (0/1) 125 * @last_completed: last completed cookie value 126 * @active: flattened chain of descriptors currently being processed 127 * @queue: pending descriptors which are handled next 128 * @free_list: list of free descriptors which can be used 129 * @runtime_addr: physical address currently used as dest/src (M2M only). This 130 * is set via %DMA_SLAVE_CONFIG before slave operation is 131 * prepared 132 * @runtime_ctrl: M2M runtime values for the control register. 133 * 134 * As EP93xx DMA controller doesn't support real chained DMA descriptors we 135 * will have slightly different scheme here: @active points to a head of 136 * flattened DMA descriptor chain. 137 * 138 * @queue holds pending transactions. These are linked through the first 139 * descriptor in the chain. When a descriptor is moved to the @active queue, 140 * the first and chained descriptors are flattened into a single list. 141 * 142 * @chan.private holds pointer to &struct ep93xx_dma_data which contains 143 * necessary channel configuration information. For memcpy channels this must 144 * be %NULL. 145 */ 146 struct ep93xx_dma_chan { 147 struct dma_chan chan; 148 const struct ep93xx_dma_engine *edma; 149 void __iomem *regs; 150 int irq; 151 struct clk *clk; 152 struct tasklet_struct tasklet; 153 /* protects the fields following */ 154 spinlock_t lock; 155 unsigned long flags; 156 /* Channel is configured for cyclic transfers */ 157 #define EP93XX_DMA_IS_CYCLIC 0 158 159 int buffer; 160 dma_cookie_t last_completed; 161 struct list_head active; 162 struct list_head queue; 163 struct list_head free_list; 164 u32 runtime_addr; 165 u32 runtime_ctrl; 166 }; 167 168 /** 169 * struct ep93xx_dma_engine - the EP93xx DMA engine instance 170 * @dma_dev: holds the dmaengine device 171 * @m2m: is this an M2M or M2P device 172 * @hw_setup: method which sets the channel up for operation 173 * @hw_shutdown: shuts the channel down and flushes whatever is left 174 * @hw_submit: pushes active descriptor(s) to the hardware 175 * @hw_interrupt: handle the interrupt 176 * @num_channels: number of channels for this instance 177 * @channels: array of channels 178 * 179 * There is one instance of this struct for the M2P channels and one for the 180 * M2M channels. hw_xxx() methods are used to perform operations which are 181 * different on M2M and M2P channels. These methods are called with channel 182 * lock held and interrupts disabled so they cannot sleep. 183 */ 184 struct ep93xx_dma_engine { 185 struct dma_device dma_dev; 186 bool m2m; 187 int (*hw_setup)(struct ep93xx_dma_chan *); 188 void (*hw_shutdown)(struct ep93xx_dma_chan *); 189 void (*hw_submit)(struct ep93xx_dma_chan *); 190 int (*hw_interrupt)(struct ep93xx_dma_chan *); 191 #define INTERRUPT_UNKNOWN 0 192 #define INTERRUPT_DONE 1 193 #define INTERRUPT_NEXT_BUFFER 2 194 195 size_t num_channels; 196 struct ep93xx_dma_chan channels[]; 197 }; 198 199 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) 200 { 201 return &edmac->chan.dev->device; 202 } 203 204 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) 205 { 206 return container_of(chan, struct ep93xx_dma_chan, chan); 207 } 208 209 /** 210 * ep93xx_dma_set_active - set new active descriptor chain 211 * @edmac: channel 212 * @desc: head of the new active descriptor chain 213 * 214 * Sets @desc to be the head of the new active descriptor chain. This is the 215 * chain which is processed next. The active list must be empty before calling 216 * this function. 217 * 218 * Called with @edmac->lock held and interrupts disabled. 219 */ 220 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, 221 struct ep93xx_dma_desc *desc) 222 { 223 BUG_ON(!list_empty(&edmac->active)); 224 225 list_add_tail(&desc->node, &edmac->active); 226 227 /* Flatten the @desc->tx_list chain into @edmac->active list */ 228 while (!list_empty(&desc->tx_list)) { 229 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, 230 struct ep93xx_dma_desc, node); 231 232 /* 233 * We copy the callback parameters from the first descriptor 234 * to all the chained descriptors. This way we can call the 235 * callback without having to find out the first descriptor in 236 * the chain. Useful for cyclic transfers. 237 */ 238 d->txd.callback = desc->txd.callback; 239 d->txd.callback_param = desc->txd.callback_param; 240 241 list_move_tail(&d->node, &edmac->active); 242 } 243 } 244 245 /* Called with @edmac->lock held and interrupts disabled */ 246 static struct ep93xx_dma_desc * 247 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) 248 { 249 if (list_empty(&edmac->active)) 250 return NULL; 251 252 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); 253 } 254 255 /** 256 * ep93xx_dma_advance_active - advances to the next active descriptor 257 * @edmac: channel 258 * 259 * Function advances active descriptor to the next in the @edmac->active and 260 * returns %true if we still have descriptors in the chain to process. 261 * Otherwise returns %false. 262 * 263 * When the channel is in cyclic mode always returns %true. 264 * 265 * Called with @edmac->lock held and interrupts disabled. 266 */ 267 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) 268 { 269 struct ep93xx_dma_desc *desc; 270 271 list_rotate_left(&edmac->active); 272 273 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 274 return true; 275 276 desc = ep93xx_dma_get_active(edmac); 277 if (!desc) 278 return false; 279 280 /* 281 * If txd.cookie is set it means that we are back in the first 282 * descriptor in the chain and hence done with it. 283 */ 284 return !desc->txd.cookie; 285 } 286 287 /* 288 * M2P DMA implementation 289 */ 290 291 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) 292 { 293 writel(control, edmac->regs + M2P_CONTROL); 294 /* 295 * EP93xx User's Guide states that we must perform a dummy read after 296 * write to the control register. 297 */ 298 readl(edmac->regs + M2P_CONTROL); 299 } 300 301 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) 302 { 303 struct ep93xx_dma_data *data = edmac->chan.private; 304 u32 control; 305 306 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); 307 308 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE 309 | M2P_CONTROL_ENABLE; 310 m2p_set_control(edmac, control); 311 312 return 0; 313 } 314 315 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) 316 { 317 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; 318 } 319 320 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 321 { 322 u32 control; 323 324 control = readl(edmac->regs + M2P_CONTROL); 325 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 326 m2p_set_control(edmac, control); 327 328 while (m2p_channel_state(edmac) >= M2P_STATE_ON) 329 cpu_relax(); 330 331 m2p_set_control(edmac, 0); 332 333 while (m2p_channel_state(edmac) == M2P_STATE_STALL) 334 cpu_relax(); 335 } 336 337 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) 338 { 339 struct ep93xx_dma_desc *desc; 340 u32 bus_addr; 341 342 desc = ep93xx_dma_get_active(edmac); 343 if (!desc) { 344 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); 345 return; 346 } 347 348 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) 349 bus_addr = desc->src_addr; 350 else 351 bus_addr = desc->dst_addr; 352 353 if (edmac->buffer == 0) { 354 writel(desc->size, edmac->regs + M2P_MAXCNT0); 355 writel(bus_addr, edmac->regs + M2P_BASE0); 356 } else { 357 writel(desc->size, edmac->regs + M2P_MAXCNT1); 358 writel(bus_addr, edmac->regs + M2P_BASE1); 359 } 360 361 edmac->buffer ^= 1; 362 } 363 364 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) 365 { 366 u32 control = readl(edmac->regs + M2P_CONTROL); 367 368 m2p_fill_desc(edmac); 369 control |= M2P_CONTROL_STALLINT; 370 371 if (ep93xx_dma_advance_active(edmac)) { 372 m2p_fill_desc(edmac); 373 control |= M2P_CONTROL_NFBINT; 374 } 375 376 m2p_set_control(edmac, control); 377 } 378 379 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) 380 { 381 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); 382 u32 control; 383 384 if (irq_status & M2P_INTERRUPT_ERROR) { 385 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 386 387 /* Clear the error interrupt */ 388 writel(1, edmac->regs + M2P_INTERRUPT); 389 390 /* 391 * It seems that there is no easy way of reporting errors back 392 * to client so we just report the error here and continue as 393 * usual. 394 * 395 * Revisit this when there is a mechanism to report back the 396 * errors. 397 */ 398 dev_err(chan2dev(edmac), 399 "DMA transfer failed! Details:\n" 400 "\tcookie : %d\n" 401 "\tsrc_addr : 0x%08x\n" 402 "\tdst_addr : 0x%08x\n" 403 "\tsize : %zu\n", 404 desc->txd.cookie, desc->src_addr, desc->dst_addr, 405 desc->size); 406 } 407 408 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { 409 case M2P_INTERRUPT_STALL: 410 /* Disable interrupts */ 411 control = readl(edmac->regs + M2P_CONTROL); 412 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 413 m2p_set_control(edmac, control); 414 415 return INTERRUPT_DONE; 416 417 case M2P_INTERRUPT_NFB: 418 if (ep93xx_dma_advance_active(edmac)) 419 m2p_fill_desc(edmac); 420 421 return INTERRUPT_NEXT_BUFFER; 422 } 423 424 return INTERRUPT_UNKNOWN; 425 } 426 427 /* 428 * M2M DMA implementation 429 * 430 * For the M2M transfers we don't use NFB at all. This is because it simply 431 * doesn't work well with memcpy transfers. When you submit both buffers it is 432 * extremely unlikely that you get an NFB interrupt, but it instead reports 433 * DONE interrupt and both buffers are already transferred which means that we 434 * weren't able to update the next buffer. 435 * 436 * So for now we "simulate" NFB by just submitting buffer after buffer 437 * without double buffering. 438 */ 439 440 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) 441 { 442 const struct ep93xx_dma_data *data = edmac->chan.private; 443 u32 control = 0; 444 445 if (!data) { 446 /* This is memcpy channel, nothing to configure */ 447 writel(control, edmac->regs + M2M_CONTROL); 448 return 0; 449 } 450 451 switch (data->port) { 452 case EP93XX_DMA_SSP: 453 /* 454 * This was found via experimenting - anything less than 5 455 * causes the channel to perform only a partial transfer which 456 * leads to problems since we don't get DONE interrupt then. 457 */ 458 control = (5 << M2M_CONTROL_PWSC_SHIFT); 459 control |= M2M_CONTROL_NO_HDSK; 460 461 if (data->direction == DMA_MEM_TO_DEV) { 462 control |= M2M_CONTROL_DAH; 463 control |= M2M_CONTROL_TM_TX; 464 control |= M2M_CONTROL_RSS_SSPTX; 465 } else { 466 control |= M2M_CONTROL_SAH; 467 control |= M2M_CONTROL_TM_RX; 468 control |= M2M_CONTROL_RSS_SSPRX; 469 } 470 break; 471 472 case EP93XX_DMA_IDE: 473 /* 474 * This IDE part is totally untested. Values below are taken 475 * from the EP93xx Users's Guide and might not be correct. 476 */ 477 if (data->direction == DMA_MEM_TO_DEV) { 478 /* Worst case from the UG */ 479 control = (3 << M2M_CONTROL_PWSC_SHIFT); 480 control |= M2M_CONTROL_DAH; 481 control |= M2M_CONTROL_TM_TX; 482 } else { 483 control = (2 << M2M_CONTROL_PWSC_SHIFT); 484 control |= M2M_CONTROL_SAH; 485 control |= M2M_CONTROL_TM_RX; 486 } 487 488 control |= M2M_CONTROL_NO_HDSK; 489 control |= M2M_CONTROL_RSS_IDE; 490 control |= M2M_CONTROL_PW_16; 491 break; 492 493 default: 494 return -EINVAL; 495 } 496 497 writel(control, edmac->regs + M2M_CONTROL); 498 return 0; 499 } 500 501 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) 502 { 503 /* Just disable the channel */ 504 writel(0, edmac->regs + M2M_CONTROL); 505 } 506 507 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) 508 { 509 struct ep93xx_dma_desc *desc; 510 511 desc = ep93xx_dma_get_active(edmac); 512 if (!desc) { 513 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); 514 return; 515 } 516 517 if (edmac->buffer == 0) { 518 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); 519 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); 520 writel(desc->size, edmac->regs + M2M_BCR0); 521 } else { 522 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); 523 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); 524 writel(desc->size, edmac->regs + M2M_BCR1); 525 } 526 527 edmac->buffer ^= 1; 528 } 529 530 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) 531 { 532 struct ep93xx_dma_data *data = edmac->chan.private; 533 u32 control = readl(edmac->regs + M2M_CONTROL); 534 535 /* 536 * Since we allow clients to configure PW (peripheral width) we always 537 * clear PW bits here and then set them according what is given in 538 * the runtime configuration. 539 */ 540 control &= ~M2M_CONTROL_PW_MASK; 541 control |= edmac->runtime_ctrl; 542 543 m2m_fill_desc(edmac); 544 control |= M2M_CONTROL_DONEINT; 545 546 /* 547 * Now we can finally enable the channel. For M2M channel this must be 548 * done _after_ the BCRx registers are programmed. 549 */ 550 control |= M2M_CONTROL_ENABLE; 551 writel(control, edmac->regs + M2M_CONTROL); 552 553 if (!data) { 554 /* 555 * For memcpy channels the software trigger must be asserted 556 * in order to start the memcpy operation. 557 */ 558 control |= M2M_CONTROL_START; 559 writel(control, edmac->regs + M2M_CONTROL); 560 } 561 } 562 563 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) 564 { 565 u32 control; 566 567 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) 568 return INTERRUPT_UNKNOWN; 569 570 /* Clear the DONE bit */ 571 writel(0, edmac->regs + M2M_INTERRUPT); 572 573 /* Disable interrupts and the channel */ 574 control = readl(edmac->regs + M2M_CONTROL); 575 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); 576 writel(control, edmac->regs + M2M_CONTROL); 577 578 /* 579 * Since we only get DONE interrupt we have to find out ourselves 580 * whether there still is something to process. So we try to advance 581 * the chain an see whether it succeeds. 582 */ 583 if (ep93xx_dma_advance_active(edmac)) { 584 edmac->edma->hw_submit(edmac); 585 return INTERRUPT_NEXT_BUFFER; 586 } 587 588 return INTERRUPT_DONE; 589 } 590 591 /* 592 * DMA engine API implementation 593 */ 594 595 static struct ep93xx_dma_desc * 596 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) 597 { 598 struct ep93xx_dma_desc *desc, *_desc; 599 struct ep93xx_dma_desc *ret = NULL; 600 unsigned long flags; 601 602 spin_lock_irqsave(&edmac->lock, flags); 603 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { 604 if (async_tx_test_ack(&desc->txd)) { 605 list_del_init(&desc->node); 606 607 /* Re-initialize the descriptor */ 608 desc->src_addr = 0; 609 desc->dst_addr = 0; 610 desc->size = 0; 611 desc->complete = false; 612 desc->txd.cookie = 0; 613 desc->txd.callback = NULL; 614 desc->txd.callback_param = NULL; 615 616 ret = desc; 617 break; 618 } 619 } 620 spin_unlock_irqrestore(&edmac->lock, flags); 621 return ret; 622 } 623 624 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, 625 struct ep93xx_dma_desc *desc) 626 { 627 if (desc) { 628 unsigned long flags; 629 630 spin_lock_irqsave(&edmac->lock, flags); 631 list_splice_init(&desc->tx_list, &edmac->free_list); 632 list_add(&desc->node, &edmac->free_list); 633 spin_unlock_irqrestore(&edmac->lock, flags); 634 } 635 } 636 637 /** 638 * ep93xx_dma_advance_work - start processing the next pending transaction 639 * @edmac: channel 640 * 641 * If we have pending transactions queued and we are currently idling, this 642 * function takes the next queued transaction from the @edmac->queue and 643 * pushes it to the hardware for execution. 644 */ 645 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) 646 { 647 struct ep93xx_dma_desc *new; 648 unsigned long flags; 649 650 spin_lock_irqsave(&edmac->lock, flags); 651 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { 652 spin_unlock_irqrestore(&edmac->lock, flags); 653 return; 654 } 655 656 /* Take the next descriptor from the pending queue */ 657 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); 658 list_del_init(&new->node); 659 660 ep93xx_dma_set_active(edmac, new); 661 662 /* Push it to the hardware */ 663 edmac->edma->hw_submit(edmac); 664 spin_unlock_irqrestore(&edmac->lock, flags); 665 } 666 667 static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) 668 { 669 struct device *dev = desc->txd.chan->device->dev; 670 671 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 672 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 673 dma_unmap_single(dev, desc->src_addr, desc->size, 674 DMA_TO_DEVICE); 675 else 676 dma_unmap_page(dev, desc->src_addr, desc->size, 677 DMA_TO_DEVICE); 678 } 679 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 680 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 681 dma_unmap_single(dev, desc->dst_addr, desc->size, 682 DMA_FROM_DEVICE); 683 else 684 dma_unmap_page(dev, desc->dst_addr, desc->size, 685 DMA_FROM_DEVICE); 686 } 687 } 688 689 static void ep93xx_dma_tasklet(unsigned long data) 690 { 691 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; 692 struct ep93xx_dma_desc *desc, *d; 693 dma_async_tx_callback callback = NULL; 694 void *callback_param = NULL; 695 LIST_HEAD(list); 696 697 spin_lock_irq(&edmac->lock); 698 /* 699 * If dma_terminate_all() was called before we get to run, the active 700 * list has become empty. If that happens we aren't supposed to do 701 * anything more than call ep93xx_dma_advance_work(). 702 */ 703 desc = ep93xx_dma_get_active(edmac); 704 if (desc) { 705 if (desc->complete) { 706 edmac->last_completed = desc->txd.cookie; 707 list_splice_init(&edmac->active, &list); 708 } 709 callback = desc->txd.callback; 710 callback_param = desc->txd.callback_param; 711 } 712 spin_unlock_irq(&edmac->lock); 713 714 /* Pick up the next descriptor from the queue */ 715 ep93xx_dma_advance_work(edmac); 716 717 /* Now we can release all the chained descriptors */ 718 list_for_each_entry_safe(desc, d, &list, node) { 719 /* 720 * For the memcpy channels the API requires us to unmap the 721 * buffers unless requested otherwise. 722 */ 723 if (!edmac->chan.private) 724 ep93xx_dma_unmap_buffers(desc); 725 726 ep93xx_dma_desc_put(edmac, desc); 727 } 728 729 if (callback) 730 callback(callback_param); 731 } 732 733 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) 734 { 735 struct ep93xx_dma_chan *edmac = dev_id; 736 struct ep93xx_dma_desc *desc; 737 irqreturn_t ret = IRQ_HANDLED; 738 739 spin_lock(&edmac->lock); 740 741 desc = ep93xx_dma_get_active(edmac); 742 if (!desc) { 743 dev_warn(chan2dev(edmac), 744 "got interrupt while active list is empty\n"); 745 spin_unlock(&edmac->lock); 746 return IRQ_NONE; 747 } 748 749 switch (edmac->edma->hw_interrupt(edmac)) { 750 case INTERRUPT_DONE: 751 desc->complete = true; 752 tasklet_schedule(&edmac->tasklet); 753 break; 754 755 case INTERRUPT_NEXT_BUFFER: 756 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 757 tasklet_schedule(&edmac->tasklet); 758 break; 759 760 default: 761 dev_warn(chan2dev(edmac), "unknown interrupt!\n"); 762 ret = IRQ_NONE; 763 break; 764 } 765 766 spin_unlock(&edmac->lock); 767 return ret; 768 } 769 770 /** 771 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed 772 * @tx: descriptor to be executed 773 * 774 * Function will execute given descriptor on the hardware or if the hardware 775 * is busy, queue the descriptor to be executed later on. Returns cookie which 776 * can be used to poll the status of the descriptor. 777 */ 778 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 779 { 780 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); 781 struct ep93xx_dma_desc *desc; 782 dma_cookie_t cookie; 783 unsigned long flags; 784 785 spin_lock_irqsave(&edmac->lock, flags); 786 787 cookie = edmac->chan.cookie; 788 789 if (++cookie < 0) 790 cookie = 1; 791 792 desc = container_of(tx, struct ep93xx_dma_desc, txd); 793 794 edmac->chan.cookie = cookie; 795 desc->txd.cookie = cookie; 796 797 /* 798 * If nothing is currently prosessed, we push this descriptor 799 * directly to the hardware. Otherwise we put the descriptor 800 * to the pending queue. 801 */ 802 if (list_empty(&edmac->active)) { 803 ep93xx_dma_set_active(edmac, desc); 804 edmac->edma->hw_submit(edmac); 805 } else { 806 list_add_tail(&desc->node, &edmac->queue); 807 } 808 809 spin_unlock_irqrestore(&edmac->lock, flags); 810 return cookie; 811 } 812 813 /** 814 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel 815 * @chan: channel to allocate resources 816 * 817 * Function allocates necessary resources for the given DMA channel and 818 * returns number of allocated descriptors for the channel. Negative errno 819 * is returned in case of failure. 820 */ 821 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) 822 { 823 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 824 struct ep93xx_dma_data *data = chan->private; 825 const char *name = dma_chan_name(chan); 826 int ret, i; 827 828 /* Sanity check the channel parameters */ 829 if (!edmac->edma->m2m) { 830 if (!data) 831 return -EINVAL; 832 if (data->port < EP93XX_DMA_I2S1 || 833 data->port > EP93XX_DMA_IRDA) 834 return -EINVAL; 835 if (data->direction != ep93xx_dma_chan_direction(chan)) 836 return -EINVAL; 837 } else { 838 if (data) { 839 switch (data->port) { 840 case EP93XX_DMA_SSP: 841 case EP93XX_DMA_IDE: 842 if (data->direction != DMA_MEM_TO_DEV && 843 data->direction != DMA_DEV_TO_MEM) 844 return -EINVAL; 845 break; 846 default: 847 return -EINVAL; 848 } 849 } 850 } 851 852 if (data && data->name) 853 name = data->name; 854 855 ret = clk_enable(edmac->clk); 856 if (ret) 857 return ret; 858 859 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); 860 if (ret) 861 goto fail_clk_disable; 862 863 spin_lock_irq(&edmac->lock); 864 edmac->last_completed = 1; 865 edmac->chan.cookie = 1; 866 ret = edmac->edma->hw_setup(edmac); 867 spin_unlock_irq(&edmac->lock); 868 869 if (ret) 870 goto fail_free_irq; 871 872 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { 873 struct ep93xx_dma_desc *desc; 874 875 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 876 if (!desc) { 877 dev_warn(chan2dev(edmac), "not enough descriptors\n"); 878 break; 879 } 880 881 INIT_LIST_HEAD(&desc->tx_list); 882 883 dma_async_tx_descriptor_init(&desc->txd, chan); 884 desc->txd.flags = DMA_CTRL_ACK; 885 desc->txd.tx_submit = ep93xx_dma_tx_submit; 886 887 ep93xx_dma_desc_put(edmac, desc); 888 } 889 890 return i; 891 892 fail_free_irq: 893 free_irq(edmac->irq, edmac); 894 fail_clk_disable: 895 clk_disable(edmac->clk); 896 897 return ret; 898 } 899 900 /** 901 * ep93xx_dma_free_chan_resources - release resources for the channel 902 * @chan: channel 903 * 904 * Function releases all the resources allocated for the given channel. 905 * The channel must be idle when this is called. 906 */ 907 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) 908 { 909 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 910 struct ep93xx_dma_desc *desc, *d; 911 unsigned long flags; 912 LIST_HEAD(list); 913 914 BUG_ON(!list_empty(&edmac->active)); 915 BUG_ON(!list_empty(&edmac->queue)); 916 917 spin_lock_irqsave(&edmac->lock, flags); 918 edmac->edma->hw_shutdown(edmac); 919 edmac->runtime_addr = 0; 920 edmac->runtime_ctrl = 0; 921 edmac->buffer = 0; 922 list_splice_init(&edmac->free_list, &list); 923 spin_unlock_irqrestore(&edmac->lock, flags); 924 925 list_for_each_entry_safe(desc, d, &list, node) 926 kfree(desc); 927 928 clk_disable(edmac->clk); 929 free_irq(edmac->irq, edmac); 930 } 931 932 /** 933 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation 934 * @chan: channel 935 * @dest: destination bus address 936 * @src: source bus address 937 * @len: size of the transaction 938 * @flags: flags for the descriptor 939 * 940 * Returns a valid DMA descriptor or %NULL in case of failure. 941 */ 942 static struct dma_async_tx_descriptor * 943 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, 944 dma_addr_t src, size_t len, unsigned long flags) 945 { 946 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 947 struct ep93xx_dma_desc *desc, *first; 948 size_t bytes, offset; 949 950 first = NULL; 951 for (offset = 0; offset < len; offset += bytes) { 952 desc = ep93xx_dma_desc_get(edmac); 953 if (!desc) { 954 dev_warn(chan2dev(edmac), "couln't get descriptor\n"); 955 goto fail; 956 } 957 958 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); 959 960 desc->src_addr = src + offset; 961 desc->dst_addr = dest + offset; 962 desc->size = bytes; 963 964 if (!first) 965 first = desc; 966 else 967 list_add_tail(&desc->node, &first->tx_list); 968 } 969 970 first->txd.cookie = -EBUSY; 971 first->txd.flags = flags; 972 973 return &first->txd; 974 fail: 975 ep93xx_dma_desc_put(edmac, first); 976 return NULL; 977 } 978 979 /** 980 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation 981 * @chan: channel 982 * @sgl: list of buffers to transfer 983 * @sg_len: number of entries in @sgl 984 * @dir: direction of tha DMA transfer 985 * @flags: flags for the descriptor 986 * 987 * Returns a valid DMA descriptor or %NULL in case of failure. 988 */ 989 static struct dma_async_tx_descriptor * 990 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 991 unsigned int sg_len, enum dma_transfer_direction dir, 992 unsigned long flags) 993 { 994 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 995 struct ep93xx_dma_desc *desc, *first; 996 struct scatterlist *sg; 997 int i; 998 999 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { 1000 dev_warn(chan2dev(edmac), 1001 "channel was configured with different direction\n"); 1002 return NULL; 1003 } 1004 1005 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { 1006 dev_warn(chan2dev(edmac), 1007 "channel is already used for cyclic transfers\n"); 1008 return NULL; 1009 } 1010 1011 first = NULL; 1012 for_each_sg(sgl, sg, sg_len, i) { 1013 size_t sg_len = sg_dma_len(sg); 1014 1015 if (sg_len > DMA_MAX_CHAN_BYTES) { 1016 dev_warn(chan2dev(edmac), "too big transfer size %d\n", 1017 sg_len); 1018 goto fail; 1019 } 1020 1021 desc = ep93xx_dma_desc_get(edmac); 1022 if (!desc) { 1023 dev_warn(chan2dev(edmac), "couln't get descriptor\n"); 1024 goto fail; 1025 } 1026 1027 if (dir == DMA_MEM_TO_DEV) { 1028 desc->src_addr = sg_dma_address(sg); 1029 desc->dst_addr = edmac->runtime_addr; 1030 } else { 1031 desc->src_addr = edmac->runtime_addr; 1032 desc->dst_addr = sg_dma_address(sg); 1033 } 1034 desc->size = sg_len; 1035 1036 if (!first) 1037 first = desc; 1038 else 1039 list_add_tail(&desc->node, &first->tx_list); 1040 } 1041 1042 first->txd.cookie = -EBUSY; 1043 first->txd.flags = flags; 1044 1045 return &first->txd; 1046 1047 fail: 1048 ep93xx_dma_desc_put(edmac, first); 1049 return NULL; 1050 } 1051 1052 /** 1053 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation 1054 * @chan: channel 1055 * @dma_addr: DMA mapped address of the buffer 1056 * @buf_len: length of the buffer (in bytes) 1057 * @period_len: lenght of a single period 1058 * @dir: direction of the operation 1059 * 1060 * Prepares a descriptor for cyclic DMA operation. This means that once the 1061 * descriptor is submitted, we will be submitting in a @period_len sized 1062 * buffers and calling callback once the period has been elapsed. Transfer 1063 * terminates only when client calls dmaengine_terminate_all() for this 1064 * channel. 1065 * 1066 * Returns a valid DMA descriptor or %NULL in case of failure. 1067 */ 1068 static struct dma_async_tx_descriptor * 1069 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 1070 size_t buf_len, size_t period_len, 1071 enum dma_transfer_direction dir) 1072 { 1073 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1074 struct ep93xx_dma_desc *desc, *first; 1075 size_t offset = 0; 1076 1077 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { 1078 dev_warn(chan2dev(edmac), 1079 "channel was configured with different direction\n"); 1080 return NULL; 1081 } 1082 1083 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { 1084 dev_warn(chan2dev(edmac), 1085 "channel is already used for cyclic transfers\n"); 1086 return NULL; 1087 } 1088 1089 if (period_len > DMA_MAX_CHAN_BYTES) { 1090 dev_warn(chan2dev(edmac), "too big period length %d\n", 1091 period_len); 1092 return NULL; 1093 } 1094 1095 /* Split the buffer into period size chunks */ 1096 first = NULL; 1097 for (offset = 0; offset < buf_len; offset += period_len) { 1098 desc = ep93xx_dma_desc_get(edmac); 1099 if (!desc) { 1100 dev_warn(chan2dev(edmac), "couln't get descriptor\n"); 1101 goto fail; 1102 } 1103 1104 if (dir == DMA_MEM_TO_DEV) { 1105 desc->src_addr = dma_addr + offset; 1106 desc->dst_addr = edmac->runtime_addr; 1107 } else { 1108 desc->src_addr = edmac->runtime_addr; 1109 desc->dst_addr = dma_addr + offset; 1110 } 1111 1112 desc->size = period_len; 1113 1114 if (!first) 1115 first = desc; 1116 else 1117 list_add_tail(&desc->node, &first->tx_list); 1118 } 1119 1120 first->txd.cookie = -EBUSY; 1121 1122 return &first->txd; 1123 1124 fail: 1125 ep93xx_dma_desc_put(edmac, first); 1126 return NULL; 1127 } 1128 1129 /** 1130 * ep93xx_dma_terminate_all - terminate all transactions 1131 * @edmac: channel 1132 * 1133 * Stops all DMA transactions. All descriptors are put back to the 1134 * @edmac->free_list and callbacks are _not_ called. 1135 */ 1136 static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) 1137 { 1138 struct ep93xx_dma_desc *desc, *_d; 1139 unsigned long flags; 1140 LIST_HEAD(list); 1141 1142 spin_lock_irqsave(&edmac->lock, flags); 1143 /* First we disable and flush the DMA channel */ 1144 edmac->edma->hw_shutdown(edmac); 1145 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); 1146 list_splice_init(&edmac->active, &list); 1147 list_splice_init(&edmac->queue, &list); 1148 /* 1149 * We then re-enable the channel. This way we can continue submitting 1150 * the descriptors by just calling ->hw_submit() again. 1151 */ 1152 edmac->edma->hw_setup(edmac); 1153 spin_unlock_irqrestore(&edmac->lock, flags); 1154 1155 list_for_each_entry_safe(desc, _d, &list, node) 1156 ep93xx_dma_desc_put(edmac, desc); 1157 1158 return 0; 1159 } 1160 1161 static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, 1162 struct dma_slave_config *config) 1163 { 1164 enum dma_slave_buswidth width; 1165 unsigned long flags; 1166 u32 addr, ctrl; 1167 1168 if (!edmac->edma->m2m) 1169 return -EINVAL; 1170 1171 switch (config->direction) { 1172 case DMA_DEV_TO_MEM: 1173 width = config->src_addr_width; 1174 addr = config->src_addr; 1175 break; 1176 1177 case DMA_MEM_TO_DEV: 1178 width = config->dst_addr_width; 1179 addr = config->dst_addr; 1180 break; 1181 1182 default: 1183 return -EINVAL; 1184 } 1185 1186 switch (width) { 1187 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1188 ctrl = 0; 1189 break; 1190 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1191 ctrl = M2M_CONTROL_PW_16; 1192 break; 1193 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1194 ctrl = M2M_CONTROL_PW_32; 1195 break; 1196 default: 1197 return -EINVAL; 1198 } 1199 1200 spin_lock_irqsave(&edmac->lock, flags); 1201 edmac->runtime_addr = addr; 1202 edmac->runtime_ctrl = ctrl; 1203 spin_unlock_irqrestore(&edmac->lock, flags); 1204 1205 return 0; 1206 } 1207 1208 /** 1209 * ep93xx_dma_control - manipulate all pending operations on a channel 1210 * @chan: channel 1211 * @cmd: control command to perform 1212 * @arg: optional argument 1213 * 1214 * Controls the channel. Function returns %0 in case of success or negative 1215 * error in case of failure. 1216 */ 1217 static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1218 unsigned long arg) 1219 { 1220 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1221 struct dma_slave_config *config; 1222 1223 switch (cmd) { 1224 case DMA_TERMINATE_ALL: 1225 return ep93xx_dma_terminate_all(edmac); 1226 1227 case DMA_SLAVE_CONFIG: 1228 config = (struct dma_slave_config *)arg; 1229 return ep93xx_dma_slave_config(edmac, config); 1230 1231 default: 1232 break; 1233 } 1234 1235 return -ENOSYS; 1236 } 1237 1238 /** 1239 * ep93xx_dma_tx_status - check if a transaction is completed 1240 * @chan: channel 1241 * @cookie: transaction specific cookie 1242 * @state: state of the transaction is stored here if given 1243 * 1244 * This function can be used to query state of a given transaction. 1245 */ 1246 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, 1247 dma_cookie_t cookie, 1248 struct dma_tx_state *state) 1249 { 1250 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1251 dma_cookie_t last_used, last_completed; 1252 enum dma_status ret; 1253 unsigned long flags; 1254 1255 spin_lock_irqsave(&edmac->lock, flags); 1256 last_used = chan->cookie; 1257 last_completed = edmac->last_completed; 1258 spin_unlock_irqrestore(&edmac->lock, flags); 1259 1260 ret = dma_async_is_complete(cookie, last_completed, last_used); 1261 dma_set_tx_state(state, last_completed, last_used, 0); 1262 1263 return ret; 1264 } 1265 1266 /** 1267 * ep93xx_dma_issue_pending - push pending transactions to the hardware 1268 * @chan: channel 1269 * 1270 * When this function is called, all pending transactions are pushed to the 1271 * hardware and executed. 1272 */ 1273 static void ep93xx_dma_issue_pending(struct dma_chan *chan) 1274 { 1275 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); 1276 } 1277 1278 static int __init ep93xx_dma_probe(struct platform_device *pdev) 1279 { 1280 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); 1281 struct ep93xx_dma_engine *edma; 1282 struct dma_device *dma_dev; 1283 size_t edma_size; 1284 int ret, i; 1285 1286 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); 1287 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); 1288 if (!edma) 1289 return -ENOMEM; 1290 1291 dma_dev = &edma->dma_dev; 1292 edma->m2m = platform_get_device_id(pdev)->driver_data; 1293 edma->num_channels = pdata->num_channels; 1294 1295 INIT_LIST_HEAD(&dma_dev->channels); 1296 for (i = 0; i < pdata->num_channels; i++) { 1297 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; 1298 struct ep93xx_dma_chan *edmac = &edma->channels[i]; 1299 1300 edmac->chan.device = dma_dev; 1301 edmac->regs = cdata->base; 1302 edmac->irq = cdata->irq; 1303 edmac->edma = edma; 1304 1305 edmac->clk = clk_get(NULL, cdata->name); 1306 if (IS_ERR(edmac->clk)) { 1307 dev_warn(&pdev->dev, "failed to get clock for %s\n", 1308 cdata->name); 1309 continue; 1310 } 1311 1312 spin_lock_init(&edmac->lock); 1313 INIT_LIST_HEAD(&edmac->active); 1314 INIT_LIST_HEAD(&edmac->queue); 1315 INIT_LIST_HEAD(&edmac->free_list); 1316 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, 1317 (unsigned long)edmac); 1318 1319 list_add_tail(&edmac->chan.device_node, 1320 &dma_dev->channels); 1321 } 1322 1323 dma_cap_zero(dma_dev->cap_mask); 1324 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 1325 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); 1326 1327 dma_dev->dev = &pdev->dev; 1328 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; 1329 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; 1330 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1331 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1332 dma_dev->device_control = ep93xx_dma_control; 1333 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1334 dma_dev->device_tx_status = ep93xx_dma_tx_status; 1335 1336 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); 1337 1338 if (edma->m2m) { 1339 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 1340 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; 1341 1342 edma->hw_setup = m2m_hw_setup; 1343 edma->hw_shutdown = m2m_hw_shutdown; 1344 edma->hw_submit = m2m_hw_submit; 1345 edma->hw_interrupt = m2m_hw_interrupt; 1346 } else { 1347 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 1348 1349 edma->hw_setup = m2p_hw_setup; 1350 edma->hw_shutdown = m2p_hw_shutdown; 1351 edma->hw_submit = m2p_hw_submit; 1352 edma->hw_interrupt = m2p_hw_interrupt; 1353 } 1354 1355 ret = dma_async_device_register(dma_dev); 1356 if (unlikely(ret)) { 1357 for (i = 0; i < edma->num_channels; i++) { 1358 struct ep93xx_dma_chan *edmac = &edma->channels[i]; 1359 if (!IS_ERR_OR_NULL(edmac->clk)) 1360 clk_put(edmac->clk); 1361 } 1362 kfree(edma); 1363 } else { 1364 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", 1365 edma->m2m ? "M" : "P"); 1366 } 1367 1368 return ret; 1369 } 1370 1371 static struct platform_device_id ep93xx_dma_driver_ids[] = { 1372 { "ep93xx-dma-m2p", 0 }, 1373 { "ep93xx-dma-m2m", 1 }, 1374 { }, 1375 }; 1376 1377 static struct platform_driver ep93xx_dma_driver = { 1378 .driver = { 1379 .name = "ep93xx-dma", 1380 }, 1381 .id_table = ep93xx_dma_driver_ids, 1382 }; 1383 1384 static int __init ep93xx_dma_module_init(void) 1385 { 1386 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); 1387 } 1388 subsys_initcall(ep93xx_dma_module_init); 1389 1390 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); 1391 MODULE_DESCRIPTION("EP93xx DMA driver"); 1392 MODULE_LICENSE("GPL"); 1393