1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller, 13 * 14 * The driver has currently been tested with the Atmel AT91SAM9RL 15 * and AT91SAM9G45 series. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dmapool.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 29 #include "at_hdmac_regs.h" 30 #include "dmaengine.h" 31 32 /* 33 * Glossary 34 * -------- 35 * 36 * at_hdmac : Name of the ATmel AHB DMA Controller 37 * at_dma_ / atdma : ATmel DMA controller entity related 38 * atc_ / atchan : ATmel DMA Channel entity related 39 */ 40 41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42 #define ATC_DEFAULT_CTRLA (0) 43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 44 |ATC_DIF(AT_DMA_MEM_IF)) 45 46 /* 47 * Initial number of descriptors to allocate for each channel. This could 48 * be increased during dma usage. 49 */ 50 static unsigned int init_nr_desc_per_channel = 64; 51 module_param(init_nr_desc_per_channel, uint, 0644); 52 MODULE_PARM_DESC(init_nr_desc_per_channel, 53 "initial descriptors per channel (default: 64)"); 54 55 56 /* prototypes */ 57 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 58 59 60 /*----------------------------------------------------------------------*/ 61 62 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 63 { 64 return list_first_entry(&atchan->active_list, 65 struct at_desc, desc_node); 66 } 67 68 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 69 { 70 return list_first_entry(&atchan->queue, 71 struct at_desc, desc_node); 72 } 73 74 /** 75 * atc_alloc_descriptor - allocate and return an initialized descriptor 76 * @chan: the channel to allocate descriptors for 77 * @gfp_flags: GFP allocation flags 78 * 79 * Note: The ack-bit is positioned in the descriptor flag at creation time 80 * to make initial allocation more convenient. This bit will be cleared 81 * and control will be given to client at usage time (during 82 * preparation functions). 83 */ 84 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 85 gfp_t gfp_flags) 86 { 87 struct at_desc *desc = NULL; 88 struct at_dma *atdma = to_at_dma(chan->device); 89 dma_addr_t phys; 90 91 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 92 if (desc) { 93 memset(desc, 0, sizeof(struct at_desc)); 94 INIT_LIST_HEAD(&desc->tx_list); 95 dma_async_tx_descriptor_init(&desc->txd, chan); 96 /* txd.flags will be overwritten in prep functions */ 97 desc->txd.flags = DMA_CTRL_ACK; 98 desc->txd.tx_submit = atc_tx_submit; 99 desc->txd.phys = phys; 100 } 101 102 return desc; 103 } 104 105 /** 106 * atc_desc_get - get an unused descriptor from free_list 107 * @atchan: channel we want a new descriptor for 108 */ 109 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 110 { 111 struct at_desc *desc, *_desc; 112 struct at_desc *ret = NULL; 113 unsigned long flags; 114 unsigned int i = 0; 115 LIST_HEAD(tmp_list); 116 117 spin_lock_irqsave(&atchan->lock, flags); 118 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 119 i++; 120 if (async_tx_test_ack(&desc->txd)) { 121 list_del(&desc->desc_node); 122 ret = desc; 123 break; 124 } 125 dev_dbg(chan2dev(&atchan->chan_common), 126 "desc %p not ACKed\n", desc); 127 } 128 spin_unlock_irqrestore(&atchan->lock, flags); 129 dev_vdbg(chan2dev(&atchan->chan_common), 130 "scanned %u descriptors on freelist\n", i); 131 132 /* no more descriptor available in initial pool: create one more */ 133 if (!ret) { 134 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 135 if (ret) { 136 spin_lock_irqsave(&atchan->lock, flags); 137 atchan->descs_allocated++; 138 spin_unlock_irqrestore(&atchan->lock, flags); 139 } else { 140 dev_err(chan2dev(&atchan->chan_common), 141 "not enough descriptors available\n"); 142 } 143 } 144 145 return ret; 146 } 147 148 /** 149 * atc_desc_put - move a descriptor, including any children, to the free list 150 * @atchan: channel we work on 151 * @desc: descriptor, at the head of a chain, to move to free list 152 */ 153 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 154 { 155 if (desc) { 156 struct at_desc *child; 157 unsigned long flags; 158 159 spin_lock_irqsave(&atchan->lock, flags); 160 list_for_each_entry(child, &desc->tx_list, desc_node) 161 dev_vdbg(chan2dev(&atchan->chan_common), 162 "moving child desc %p to freelist\n", 163 child); 164 list_splice_init(&desc->tx_list, &atchan->free_list); 165 dev_vdbg(chan2dev(&atchan->chan_common), 166 "moving desc %p to freelist\n", desc); 167 list_add(&desc->desc_node, &atchan->free_list); 168 spin_unlock_irqrestore(&atchan->lock, flags); 169 } 170 } 171 172 /** 173 * atc_desc_chain - build chain adding a descripor 174 * @first: address of first descripor of the chain 175 * @prev: address of previous descripor of the chain 176 * @desc: descriptor to queue 177 * 178 * Called from prep_* functions 179 */ 180 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 181 struct at_desc *desc) 182 { 183 if (!(*first)) { 184 *first = desc; 185 } else { 186 /* inform the HW lli about chaining */ 187 (*prev)->lli.dscr = desc->txd.phys; 188 /* insert the link descriptor to the LD ring */ 189 list_add_tail(&desc->desc_node, 190 &(*first)->tx_list); 191 } 192 *prev = desc; 193 } 194 195 /** 196 * atc_dostart - starts the DMA engine for real 197 * @atchan: the channel we want to start 198 * @first: first descriptor in the list we want to begin with 199 * 200 * Called with atchan->lock held and bh disabled 201 */ 202 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 203 { 204 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 205 206 /* ASSERT: channel is idle */ 207 if (atc_chan_is_enabled(atchan)) { 208 dev_err(chan2dev(&atchan->chan_common), 209 "BUG: Attempted to start non-idle channel\n"); 210 dev_err(chan2dev(&atchan->chan_common), 211 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 212 channel_readl(atchan, SADDR), 213 channel_readl(atchan, DADDR), 214 channel_readl(atchan, CTRLA), 215 channel_readl(atchan, CTRLB), 216 channel_readl(atchan, DSCR)); 217 218 /* The tasklet will hopefully advance the queue... */ 219 return; 220 } 221 222 vdbg_dump_regs(atchan); 223 224 /* clear any pending interrupt */ 225 while (dma_readl(atdma, EBCISR)) 226 cpu_relax(); 227 228 channel_writel(atchan, SADDR, 0); 229 channel_writel(atchan, DADDR, 0); 230 channel_writel(atchan, CTRLA, 0); 231 channel_writel(atchan, CTRLB, 0); 232 channel_writel(atchan, DSCR, first->txd.phys); 233 dma_writel(atdma, CHER, atchan->mask); 234 235 vdbg_dump_regs(atchan); 236 } 237 238 /** 239 * atc_chain_complete - finish work for one transaction chain 240 * @atchan: channel we work on 241 * @desc: descriptor at the head of the chain we want do complete 242 * 243 * Called with atchan->lock held and bh disabled */ 244 static void 245 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 246 { 247 struct dma_async_tx_descriptor *txd = &desc->txd; 248 249 dev_vdbg(chan2dev(&atchan->chan_common), 250 "descriptor %u complete\n", txd->cookie); 251 252 dma_cookie_complete(txd); 253 254 /* move children to free_list */ 255 list_splice_init(&desc->tx_list, &atchan->free_list); 256 /* move myself to free_list */ 257 list_move(&desc->desc_node, &atchan->free_list); 258 259 /* unmap dma addresses (not on slave channels) */ 260 if (!atchan->chan_common.private) { 261 struct device *parent = chan2parent(&atchan->chan_common); 262 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 263 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 264 dma_unmap_single(parent, 265 desc->lli.daddr, 266 desc->len, DMA_FROM_DEVICE); 267 else 268 dma_unmap_page(parent, 269 desc->lli.daddr, 270 desc->len, DMA_FROM_DEVICE); 271 } 272 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 273 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 274 dma_unmap_single(parent, 275 desc->lli.saddr, 276 desc->len, DMA_TO_DEVICE); 277 else 278 dma_unmap_page(parent, 279 desc->lli.saddr, 280 desc->len, DMA_TO_DEVICE); 281 } 282 } 283 284 /* for cyclic transfers, 285 * no need to replay callback function while stopping */ 286 if (!atc_chan_is_cyclic(atchan)) { 287 dma_async_tx_callback callback = txd->callback; 288 void *param = txd->callback_param; 289 290 /* 291 * The API requires that no submissions are done from a 292 * callback, so we don't need to drop the lock here 293 */ 294 if (callback) 295 callback(param); 296 } 297 298 dma_run_dependencies(txd); 299 } 300 301 /** 302 * atc_complete_all - finish work for all transactions 303 * @atchan: channel to complete transactions for 304 * 305 * Eventually submit queued descriptors if any 306 * 307 * Assume channel is idle while calling this function 308 * Called with atchan->lock held and bh disabled 309 */ 310 static void atc_complete_all(struct at_dma_chan *atchan) 311 { 312 struct at_desc *desc, *_desc; 313 LIST_HEAD(list); 314 315 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 316 317 BUG_ON(atc_chan_is_enabled(atchan)); 318 319 /* 320 * Submit queued descriptors ASAP, i.e. before we go through 321 * the completed ones. 322 */ 323 if (!list_empty(&atchan->queue)) 324 atc_dostart(atchan, atc_first_queued(atchan)); 325 /* empty active_list now it is completed */ 326 list_splice_init(&atchan->active_list, &list); 327 /* empty queue list by moving descriptors (if any) to active_list */ 328 list_splice_init(&atchan->queue, &atchan->active_list); 329 330 list_for_each_entry_safe(desc, _desc, &list, desc_node) 331 atc_chain_complete(atchan, desc); 332 } 333 334 /** 335 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 336 * @atchan: channel to be cleaned up 337 * 338 * Called with atchan->lock held and bh disabled 339 */ 340 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 341 { 342 struct at_desc *desc, *_desc; 343 struct at_desc *child; 344 345 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 346 347 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 348 if (!(desc->lli.ctrla & ATC_DONE)) 349 /* This one is currently in progress */ 350 return; 351 352 list_for_each_entry(child, &desc->tx_list, desc_node) 353 if (!(child->lli.ctrla & ATC_DONE)) 354 /* Currently in progress */ 355 return; 356 357 /* 358 * No descriptors so far seem to be in progress, i.e. 359 * this chain must be done. 360 */ 361 atc_chain_complete(atchan, desc); 362 } 363 } 364 365 /** 366 * atc_advance_work - at the end of a transaction, move forward 367 * @atchan: channel where the transaction ended 368 * 369 * Called with atchan->lock held and bh disabled 370 */ 371 static void atc_advance_work(struct at_dma_chan *atchan) 372 { 373 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 374 375 if (list_empty(&atchan->active_list) || 376 list_is_singular(&atchan->active_list)) { 377 atc_complete_all(atchan); 378 } else { 379 atc_chain_complete(atchan, atc_first_active(atchan)); 380 /* advance work */ 381 atc_dostart(atchan, atc_first_active(atchan)); 382 } 383 } 384 385 386 /** 387 * atc_handle_error - handle errors reported by DMA controller 388 * @atchan: channel where error occurs 389 * 390 * Called with atchan->lock held and bh disabled 391 */ 392 static void atc_handle_error(struct at_dma_chan *atchan) 393 { 394 struct at_desc *bad_desc; 395 struct at_desc *child; 396 397 /* 398 * The descriptor currently at the head of the active list is 399 * broked. Since we don't have any way to report errors, we'll 400 * just have to scream loudly and try to carry on. 401 */ 402 bad_desc = atc_first_active(atchan); 403 list_del_init(&bad_desc->desc_node); 404 405 /* As we are stopped, take advantage to push queued descriptors 406 * in active_list */ 407 list_splice_init(&atchan->queue, atchan->active_list.prev); 408 409 /* Try to restart the controller */ 410 if (!list_empty(&atchan->active_list)) 411 atc_dostart(atchan, atc_first_active(atchan)); 412 413 /* 414 * KERN_CRITICAL may seem harsh, but since this only happens 415 * when someone submits a bad physical address in a 416 * descriptor, we should consider ourselves lucky that the 417 * controller flagged an error instead of scribbling over 418 * random memory locations. 419 */ 420 dev_crit(chan2dev(&atchan->chan_common), 421 "Bad descriptor submitted for DMA!\n"); 422 dev_crit(chan2dev(&atchan->chan_common), 423 " cookie: %d\n", bad_desc->txd.cookie); 424 atc_dump_lli(atchan, &bad_desc->lli); 425 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 426 atc_dump_lli(atchan, &child->lli); 427 428 /* Pretend the descriptor completed successfully */ 429 atc_chain_complete(atchan, bad_desc); 430 } 431 432 /** 433 * atc_handle_cyclic - at the end of a period, run callback function 434 * @atchan: channel used for cyclic operations 435 * 436 * Called with atchan->lock held and bh disabled 437 */ 438 static void atc_handle_cyclic(struct at_dma_chan *atchan) 439 { 440 struct at_desc *first = atc_first_active(atchan); 441 struct dma_async_tx_descriptor *txd = &first->txd; 442 dma_async_tx_callback callback = txd->callback; 443 void *param = txd->callback_param; 444 445 dev_vdbg(chan2dev(&atchan->chan_common), 446 "new cyclic period llp 0x%08x\n", 447 channel_readl(atchan, DSCR)); 448 449 if (callback) 450 callback(param); 451 } 452 453 /*-- IRQ & Tasklet ---------------------------------------------------*/ 454 455 static void atc_tasklet(unsigned long data) 456 { 457 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 458 unsigned long flags; 459 460 spin_lock_irqsave(&atchan->lock, flags); 461 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 462 atc_handle_error(atchan); 463 else if (atc_chan_is_cyclic(atchan)) 464 atc_handle_cyclic(atchan); 465 else 466 atc_advance_work(atchan); 467 468 spin_unlock_irqrestore(&atchan->lock, flags); 469 } 470 471 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 472 { 473 struct at_dma *atdma = (struct at_dma *)dev_id; 474 struct at_dma_chan *atchan; 475 int i; 476 u32 status, pending, imr; 477 int ret = IRQ_NONE; 478 479 do { 480 imr = dma_readl(atdma, EBCIMR); 481 status = dma_readl(atdma, EBCISR); 482 pending = status & imr; 483 484 if (!pending) 485 break; 486 487 dev_vdbg(atdma->dma_common.dev, 488 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 489 status, imr, pending); 490 491 for (i = 0; i < atdma->dma_common.chancnt; i++) { 492 atchan = &atdma->chan[i]; 493 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 494 if (pending & AT_DMA_ERR(i)) { 495 /* Disable channel on AHB error */ 496 dma_writel(atdma, CHDR, 497 AT_DMA_RES(i) | atchan->mask); 498 /* Give information to tasklet */ 499 set_bit(ATC_IS_ERROR, &atchan->status); 500 } 501 tasklet_schedule(&atchan->tasklet); 502 ret = IRQ_HANDLED; 503 } 504 } 505 506 } while (pending); 507 508 return ret; 509 } 510 511 512 /*-- DMA Engine API --------------------------------------------------*/ 513 514 /** 515 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 516 * @desc: descriptor at the head of the transaction chain 517 * 518 * Queue chain if DMA engine is working already 519 * 520 * Cookie increment and adding to active_list or queue must be atomic 521 */ 522 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 523 { 524 struct at_desc *desc = txd_to_at_desc(tx); 525 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 526 dma_cookie_t cookie; 527 unsigned long flags; 528 529 spin_lock_irqsave(&atchan->lock, flags); 530 cookie = dma_cookie_assign(tx); 531 532 if (list_empty(&atchan->active_list)) { 533 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 534 desc->txd.cookie); 535 atc_dostart(atchan, desc); 536 list_add_tail(&desc->desc_node, &atchan->active_list); 537 } else { 538 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 539 desc->txd.cookie); 540 list_add_tail(&desc->desc_node, &atchan->queue); 541 } 542 543 spin_unlock_irqrestore(&atchan->lock, flags); 544 545 return cookie; 546 } 547 548 /** 549 * atc_prep_dma_memcpy - prepare a memcpy operation 550 * @chan: the channel to prepare operation on 551 * @dest: operation virtual destination address 552 * @src: operation virtual source address 553 * @len: operation length 554 * @flags: tx descriptor status flags 555 */ 556 static struct dma_async_tx_descriptor * 557 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 558 size_t len, unsigned long flags) 559 { 560 struct at_dma_chan *atchan = to_at_dma_chan(chan); 561 struct at_desc *desc = NULL; 562 struct at_desc *first = NULL; 563 struct at_desc *prev = NULL; 564 size_t xfer_count; 565 size_t offset; 566 unsigned int src_width; 567 unsigned int dst_width; 568 u32 ctrla; 569 u32 ctrlb; 570 571 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 572 dest, src, len, flags); 573 574 if (unlikely(!len)) { 575 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 576 return NULL; 577 } 578 579 ctrla = ATC_DEFAULT_CTRLA; 580 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 581 | ATC_SRC_ADDR_MODE_INCR 582 | ATC_DST_ADDR_MODE_INCR 583 | ATC_FC_MEM2MEM; 584 585 /* 586 * We can be a lot more clever here, but this should take care 587 * of the most common optimization. 588 */ 589 if (!((src | dest | len) & 3)) { 590 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 591 src_width = dst_width = 2; 592 } else if (!((src | dest | len) & 1)) { 593 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 594 src_width = dst_width = 1; 595 } else { 596 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 597 src_width = dst_width = 0; 598 } 599 600 for (offset = 0; offset < len; offset += xfer_count << src_width) { 601 xfer_count = min_t(size_t, (len - offset) >> src_width, 602 ATC_BTSIZE_MAX); 603 604 desc = atc_desc_get(atchan); 605 if (!desc) 606 goto err_desc_get; 607 608 desc->lli.saddr = src + offset; 609 desc->lli.daddr = dest + offset; 610 desc->lli.ctrla = ctrla | xfer_count; 611 desc->lli.ctrlb = ctrlb; 612 613 desc->txd.cookie = 0; 614 615 atc_desc_chain(&first, &prev, desc); 616 } 617 618 /* First descriptor of the chain embedds additional information */ 619 first->txd.cookie = -EBUSY; 620 first->len = len; 621 622 /* set end-of-link to the last link descriptor of list*/ 623 set_desc_eol(desc); 624 625 first->txd.flags = flags; /* client is in control of this ack */ 626 627 return &first->txd; 628 629 err_desc_get: 630 atc_desc_put(atchan, first); 631 return NULL; 632 } 633 634 635 /** 636 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 637 * @chan: DMA channel 638 * @sgl: scatterlist to transfer to/from 639 * @sg_len: number of entries in @scatterlist 640 * @direction: DMA direction 641 * @flags: tx descriptor status flags 642 * @context: transaction context (ignored) 643 */ 644 static struct dma_async_tx_descriptor * 645 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 646 unsigned int sg_len, enum dma_transfer_direction direction, 647 unsigned long flags, void *context) 648 { 649 struct at_dma_chan *atchan = to_at_dma_chan(chan); 650 struct at_dma_slave *atslave = chan->private; 651 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 652 struct at_desc *first = NULL; 653 struct at_desc *prev = NULL; 654 u32 ctrla; 655 u32 ctrlb; 656 dma_addr_t reg; 657 unsigned int reg_width; 658 unsigned int mem_width; 659 unsigned int i; 660 struct scatterlist *sg; 661 size_t total_len = 0; 662 663 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 664 sg_len, 665 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 666 flags); 667 668 if (unlikely(!atslave || !sg_len)) { 669 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 670 return NULL; 671 } 672 673 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 674 ctrlb = ATC_IEN; 675 676 switch (direction) { 677 case DMA_MEM_TO_DEV: 678 reg_width = convert_buswidth(sconfig->dst_addr_width); 679 ctrla |= ATC_DST_WIDTH(reg_width); 680 ctrlb |= ATC_DST_ADDR_MODE_FIXED 681 | ATC_SRC_ADDR_MODE_INCR 682 | ATC_FC_MEM2PER 683 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 684 reg = sconfig->dst_addr; 685 for_each_sg(sgl, sg, sg_len, i) { 686 struct at_desc *desc; 687 u32 len; 688 u32 mem; 689 690 desc = atc_desc_get(atchan); 691 if (!desc) 692 goto err_desc_get; 693 694 mem = sg_dma_address(sg); 695 len = sg_dma_len(sg); 696 mem_width = 2; 697 if (unlikely(mem & 3 || len & 3)) 698 mem_width = 0; 699 700 desc->lli.saddr = mem; 701 desc->lli.daddr = reg; 702 desc->lli.ctrla = ctrla 703 | ATC_SRC_WIDTH(mem_width) 704 | len >> mem_width; 705 desc->lli.ctrlb = ctrlb; 706 707 atc_desc_chain(&first, &prev, desc); 708 total_len += len; 709 } 710 break; 711 case DMA_DEV_TO_MEM: 712 reg_width = convert_buswidth(sconfig->src_addr_width); 713 ctrla |= ATC_SRC_WIDTH(reg_width); 714 ctrlb |= ATC_DST_ADDR_MODE_INCR 715 | ATC_SRC_ADDR_MODE_FIXED 716 | ATC_FC_PER2MEM 717 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 718 719 reg = sconfig->src_addr; 720 for_each_sg(sgl, sg, sg_len, i) { 721 struct at_desc *desc; 722 u32 len; 723 u32 mem; 724 725 desc = atc_desc_get(atchan); 726 if (!desc) 727 goto err_desc_get; 728 729 mem = sg_dma_address(sg); 730 len = sg_dma_len(sg); 731 mem_width = 2; 732 if (unlikely(mem & 3 || len & 3)) 733 mem_width = 0; 734 735 desc->lli.saddr = reg; 736 desc->lli.daddr = mem; 737 desc->lli.ctrla = ctrla 738 | ATC_DST_WIDTH(mem_width) 739 | len >> reg_width; 740 desc->lli.ctrlb = ctrlb; 741 742 atc_desc_chain(&first, &prev, desc); 743 total_len += len; 744 } 745 break; 746 default: 747 return NULL; 748 } 749 750 /* set end-of-link to the last link descriptor of list*/ 751 set_desc_eol(prev); 752 753 /* First descriptor of the chain embedds additional information */ 754 first->txd.cookie = -EBUSY; 755 first->len = total_len; 756 757 /* first link descriptor of list is responsible of flags */ 758 first->txd.flags = flags; /* client is in control of this ack */ 759 760 return &first->txd; 761 762 err_desc_get: 763 dev_err(chan2dev(chan), "not enough descriptors available\n"); 764 atc_desc_put(atchan, first); 765 return NULL; 766 } 767 768 /** 769 * atc_dma_cyclic_check_values 770 * Check for too big/unaligned periods and unaligned DMA buffer 771 */ 772 static int 773 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 774 size_t period_len, enum dma_transfer_direction direction) 775 { 776 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 777 goto err_out; 778 if (unlikely(period_len & ((1 << reg_width) - 1))) 779 goto err_out; 780 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 781 goto err_out; 782 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) 783 goto err_out; 784 785 return 0; 786 787 err_out: 788 return -EINVAL; 789 } 790 791 /** 792 * atc_dma_cyclic_fill_desc - Fill one period decriptor 793 */ 794 static int 795 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 796 unsigned int period_index, dma_addr_t buf_addr, 797 unsigned int reg_width, size_t period_len, 798 enum dma_transfer_direction direction) 799 { 800 struct at_dma_chan *atchan = to_at_dma_chan(chan); 801 struct at_dma_slave *atslave = chan->private; 802 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 803 u32 ctrla; 804 805 /* prepare common CRTLA value */ 806 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 807 | ATC_DST_WIDTH(reg_width) 808 | ATC_SRC_WIDTH(reg_width) 809 | period_len >> reg_width; 810 811 switch (direction) { 812 case DMA_MEM_TO_DEV: 813 desc->lli.saddr = buf_addr + (period_len * period_index); 814 desc->lli.daddr = sconfig->dst_addr; 815 desc->lli.ctrla = ctrla; 816 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 817 | ATC_SRC_ADDR_MODE_INCR 818 | ATC_FC_MEM2PER 819 | ATC_SIF(AT_DMA_MEM_IF) 820 | ATC_DIF(AT_DMA_PER_IF); 821 break; 822 823 case DMA_DEV_TO_MEM: 824 desc->lli.saddr = sconfig->src_addr; 825 desc->lli.daddr = buf_addr + (period_len * period_index); 826 desc->lli.ctrla = ctrla; 827 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 828 | ATC_SRC_ADDR_MODE_FIXED 829 | ATC_FC_PER2MEM 830 | ATC_SIF(AT_DMA_PER_IF) 831 | ATC_DIF(AT_DMA_MEM_IF); 832 break; 833 834 default: 835 return -EINVAL; 836 } 837 838 return 0; 839 } 840 841 /** 842 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 843 * @chan: the DMA channel to prepare 844 * @buf_addr: physical DMA address where the buffer starts 845 * @buf_len: total number of bytes for the entire buffer 846 * @period_len: number of bytes for each period 847 * @direction: transfer direction, to or from device 848 * @context: transfer context (ignored) 849 */ 850 static struct dma_async_tx_descriptor * 851 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 852 size_t period_len, enum dma_transfer_direction direction, 853 void *context) 854 { 855 struct at_dma_chan *atchan = to_at_dma_chan(chan); 856 struct at_dma_slave *atslave = chan->private; 857 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 858 struct at_desc *first = NULL; 859 struct at_desc *prev = NULL; 860 unsigned long was_cyclic; 861 unsigned int reg_width; 862 unsigned int periods = buf_len / period_len; 863 unsigned int i; 864 865 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 866 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 867 buf_addr, 868 periods, buf_len, period_len); 869 870 if (unlikely(!atslave || !buf_len || !period_len)) { 871 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 872 return NULL; 873 } 874 875 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 876 if (was_cyclic) { 877 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 878 return NULL; 879 } 880 881 if (sconfig->direction == DMA_MEM_TO_DEV) 882 reg_width = convert_buswidth(sconfig->dst_addr_width); 883 else 884 reg_width = convert_buswidth(sconfig->src_addr_width); 885 886 /* Check for too big/unaligned periods and unaligned DMA buffer */ 887 if (atc_dma_cyclic_check_values(reg_width, buf_addr, 888 period_len, direction)) 889 goto err_out; 890 891 /* build cyclic linked list */ 892 for (i = 0; i < periods; i++) { 893 struct at_desc *desc; 894 895 desc = atc_desc_get(atchan); 896 if (!desc) 897 goto err_desc_get; 898 899 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 900 reg_width, period_len, direction)) 901 goto err_desc_get; 902 903 atc_desc_chain(&first, &prev, desc); 904 } 905 906 /* lets make a cyclic list */ 907 prev->lli.dscr = first->txd.phys; 908 909 /* First descriptor of the chain embedds additional information */ 910 first->txd.cookie = -EBUSY; 911 first->len = buf_len; 912 913 return &first->txd; 914 915 err_desc_get: 916 dev_err(chan2dev(chan), "not enough descriptors available\n"); 917 atc_desc_put(atchan, first); 918 err_out: 919 clear_bit(ATC_IS_CYCLIC, &atchan->status); 920 return NULL; 921 } 922 923 static int set_runtime_config(struct dma_chan *chan, 924 struct dma_slave_config *sconfig) 925 { 926 struct at_dma_chan *atchan = to_at_dma_chan(chan); 927 928 /* Check if it is chan is configured for slave transfers */ 929 if (!chan->private) 930 return -EINVAL; 931 932 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 933 934 convert_burst(&atchan->dma_sconfig.src_maxburst); 935 convert_burst(&atchan->dma_sconfig.dst_maxburst); 936 937 return 0; 938 } 939 940 941 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 942 unsigned long arg) 943 { 944 struct at_dma_chan *atchan = to_at_dma_chan(chan); 945 struct at_dma *atdma = to_at_dma(chan->device); 946 int chan_id = atchan->chan_common.chan_id; 947 unsigned long flags; 948 949 LIST_HEAD(list); 950 951 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 952 953 if (cmd == DMA_PAUSE) { 954 spin_lock_irqsave(&atchan->lock, flags); 955 956 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 957 set_bit(ATC_IS_PAUSED, &atchan->status); 958 959 spin_unlock_irqrestore(&atchan->lock, flags); 960 } else if (cmd == DMA_RESUME) { 961 if (!atc_chan_is_paused(atchan)) 962 return 0; 963 964 spin_lock_irqsave(&atchan->lock, flags); 965 966 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 967 clear_bit(ATC_IS_PAUSED, &atchan->status); 968 969 spin_unlock_irqrestore(&atchan->lock, flags); 970 } else if (cmd == DMA_TERMINATE_ALL) { 971 struct at_desc *desc, *_desc; 972 /* 973 * This is only called when something went wrong elsewhere, so 974 * we don't really care about the data. Just disable the 975 * channel. We still have to poll the channel enable bit due 976 * to AHB/HSB limitations. 977 */ 978 spin_lock_irqsave(&atchan->lock, flags); 979 980 /* disabling channel: must also remove suspend state */ 981 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 982 983 /* confirm that this channel is disabled */ 984 while (dma_readl(atdma, CHSR) & atchan->mask) 985 cpu_relax(); 986 987 /* active_list entries will end up before queued entries */ 988 list_splice_init(&atchan->queue, &list); 989 list_splice_init(&atchan->active_list, &list); 990 991 /* Flush all pending and queued descriptors */ 992 list_for_each_entry_safe(desc, _desc, &list, desc_node) 993 atc_chain_complete(atchan, desc); 994 995 clear_bit(ATC_IS_PAUSED, &atchan->status); 996 /* if channel dedicated to cyclic operations, free it */ 997 clear_bit(ATC_IS_CYCLIC, &atchan->status); 998 999 spin_unlock_irqrestore(&atchan->lock, flags); 1000 } else if (cmd == DMA_SLAVE_CONFIG) { 1001 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1002 } else { 1003 return -ENXIO; 1004 } 1005 1006 return 0; 1007 } 1008 1009 /** 1010 * atc_tx_status - poll for transaction completion 1011 * @chan: DMA channel 1012 * @cookie: transaction identifier to check status of 1013 * @txstate: if not %NULL updated with transaction state 1014 * 1015 * If @txstate is passed in, upon return it reflect the driver 1016 * internal state and can be used with dma_async_is_complete() to check 1017 * the status of multiple cookies without re-checking hardware state. 1018 */ 1019 static enum dma_status 1020 atc_tx_status(struct dma_chan *chan, 1021 dma_cookie_t cookie, 1022 struct dma_tx_state *txstate) 1023 { 1024 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1025 dma_cookie_t last_used; 1026 dma_cookie_t last_complete; 1027 unsigned long flags; 1028 enum dma_status ret; 1029 1030 spin_lock_irqsave(&atchan->lock, flags); 1031 1032 ret = dma_cookie_status(chan, cookie, txstate); 1033 if (ret != DMA_SUCCESS) { 1034 atc_cleanup_descriptors(atchan); 1035 1036 ret = dma_cookie_status(chan, cookie, txstate); 1037 } 1038 1039 last_complete = chan->completed_cookie; 1040 last_used = chan->cookie; 1041 1042 spin_unlock_irqrestore(&atchan->lock, flags); 1043 1044 if (ret != DMA_SUCCESS) 1045 dma_set_residue(txstate, atc_first_active(atchan)->len); 1046 1047 if (atc_chan_is_paused(atchan)) 1048 ret = DMA_PAUSED; 1049 1050 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1051 ret, cookie, last_complete ? last_complete : 0, 1052 last_used ? last_used : 0); 1053 1054 return ret; 1055 } 1056 1057 /** 1058 * atc_issue_pending - try to finish work 1059 * @chan: target DMA channel 1060 */ 1061 static void atc_issue_pending(struct dma_chan *chan) 1062 { 1063 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1064 unsigned long flags; 1065 1066 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1067 1068 /* Not needed for cyclic transfers */ 1069 if (atc_chan_is_cyclic(atchan)) 1070 return; 1071 1072 spin_lock_irqsave(&atchan->lock, flags); 1073 if (!atc_chan_is_enabled(atchan)) { 1074 atc_advance_work(atchan); 1075 } 1076 spin_unlock_irqrestore(&atchan->lock, flags); 1077 } 1078 1079 /** 1080 * atc_alloc_chan_resources - allocate resources for DMA channel 1081 * @chan: allocate descriptor resources for this channel 1082 * @client: current client requesting the channel be ready for requests 1083 * 1084 * return - the number of allocated descriptors 1085 */ 1086 static int atc_alloc_chan_resources(struct dma_chan *chan) 1087 { 1088 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1089 struct at_dma *atdma = to_at_dma(chan->device); 1090 struct at_desc *desc; 1091 struct at_dma_slave *atslave; 1092 unsigned long flags; 1093 int i; 1094 u32 cfg; 1095 LIST_HEAD(tmp_list); 1096 1097 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1098 1099 /* ASSERT: channel is idle */ 1100 if (atc_chan_is_enabled(atchan)) { 1101 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1102 return -EIO; 1103 } 1104 1105 cfg = ATC_DEFAULT_CFG; 1106 1107 atslave = chan->private; 1108 if (atslave) { 1109 /* 1110 * We need controller-specific data to set up slave 1111 * transfers. 1112 */ 1113 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1114 1115 /* if cfg configuration specified take it instad of default */ 1116 if (atslave->cfg) 1117 cfg = atslave->cfg; 1118 } 1119 1120 /* have we already been set up? 1121 * reconfigure channel but no need to reallocate descriptors */ 1122 if (!list_empty(&atchan->free_list)) 1123 return atchan->descs_allocated; 1124 1125 /* Allocate initial pool of descriptors */ 1126 for (i = 0; i < init_nr_desc_per_channel; i++) { 1127 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1128 if (!desc) { 1129 dev_err(atdma->dma_common.dev, 1130 "Only %d initial descriptors\n", i); 1131 break; 1132 } 1133 list_add_tail(&desc->desc_node, &tmp_list); 1134 } 1135 1136 spin_lock_irqsave(&atchan->lock, flags); 1137 atchan->descs_allocated = i; 1138 list_splice(&tmp_list, &atchan->free_list); 1139 dma_cookie_init(chan); 1140 spin_unlock_irqrestore(&atchan->lock, flags); 1141 1142 /* channel parameters */ 1143 channel_writel(atchan, CFG, cfg); 1144 1145 dev_dbg(chan2dev(chan), 1146 "alloc_chan_resources: allocated %d descriptors\n", 1147 atchan->descs_allocated); 1148 1149 return atchan->descs_allocated; 1150 } 1151 1152 /** 1153 * atc_free_chan_resources - free all channel resources 1154 * @chan: DMA channel 1155 */ 1156 static void atc_free_chan_resources(struct dma_chan *chan) 1157 { 1158 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1159 struct at_dma *atdma = to_at_dma(chan->device); 1160 struct at_desc *desc, *_desc; 1161 LIST_HEAD(list); 1162 1163 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1164 atchan->descs_allocated); 1165 1166 /* ASSERT: channel is idle */ 1167 BUG_ON(!list_empty(&atchan->active_list)); 1168 BUG_ON(!list_empty(&atchan->queue)); 1169 BUG_ON(atc_chan_is_enabled(atchan)); 1170 1171 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1172 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1173 list_del(&desc->desc_node); 1174 /* free link descriptor */ 1175 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1176 } 1177 list_splice_init(&atchan->free_list, &list); 1178 atchan->descs_allocated = 0; 1179 atchan->status = 0; 1180 1181 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1182 } 1183 1184 1185 /*-- Module Management -----------------------------------------------*/ 1186 1187 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1188 static struct at_dma_platform_data at91sam9rl_config = { 1189 .nr_channels = 2, 1190 }; 1191 static struct at_dma_platform_data at91sam9g45_config = { 1192 .nr_channels = 8, 1193 }; 1194 1195 #if defined(CONFIG_OF) 1196 static const struct of_device_id atmel_dma_dt_ids[] = { 1197 { 1198 .compatible = "atmel,at91sam9rl-dma", 1199 .data = &at91sam9rl_config, 1200 }, { 1201 .compatible = "atmel,at91sam9g45-dma", 1202 .data = &at91sam9g45_config, 1203 }, { 1204 /* sentinel */ 1205 } 1206 }; 1207 1208 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1209 #endif 1210 1211 static const struct platform_device_id atdma_devtypes[] = { 1212 { 1213 .name = "at91sam9rl_dma", 1214 .driver_data = (unsigned long) &at91sam9rl_config, 1215 }, { 1216 .name = "at91sam9g45_dma", 1217 .driver_data = (unsigned long) &at91sam9g45_config, 1218 }, { 1219 /* sentinel */ 1220 } 1221 }; 1222 1223 static inline struct at_dma_platform_data * __init at_dma_get_driver_data( 1224 struct platform_device *pdev) 1225 { 1226 if (pdev->dev.of_node) { 1227 const struct of_device_id *match; 1228 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1229 if (match == NULL) 1230 return NULL; 1231 return match->data; 1232 } 1233 return (struct at_dma_platform_data *) 1234 platform_get_device_id(pdev)->driver_data; 1235 } 1236 1237 /** 1238 * at_dma_off - disable DMA controller 1239 * @atdma: the Atmel HDAMC device 1240 */ 1241 static void at_dma_off(struct at_dma *atdma) 1242 { 1243 dma_writel(atdma, EN, 0); 1244 1245 /* disable all interrupts */ 1246 dma_writel(atdma, EBCIDR, -1L); 1247 1248 /* confirm that all channels are disabled */ 1249 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1250 cpu_relax(); 1251 } 1252 1253 static int __init at_dma_probe(struct platform_device *pdev) 1254 { 1255 struct resource *io; 1256 struct at_dma *atdma; 1257 size_t size; 1258 int irq; 1259 int err; 1260 int i; 1261 struct at_dma_platform_data *plat_dat; 1262 1263 /* setup platform data for each SoC */ 1264 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1265 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1266 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1267 1268 /* get DMA parameters from controller type */ 1269 plat_dat = at_dma_get_driver_data(pdev); 1270 if (!plat_dat) 1271 return -ENODEV; 1272 1273 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1274 if (!io) 1275 return -EINVAL; 1276 1277 irq = platform_get_irq(pdev, 0); 1278 if (irq < 0) 1279 return irq; 1280 1281 size = sizeof(struct at_dma); 1282 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1283 atdma = kzalloc(size, GFP_KERNEL); 1284 if (!atdma) 1285 return -ENOMEM; 1286 1287 /* discover transaction capabilities */ 1288 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1289 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1290 1291 size = resource_size(io); 1292 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1293 err = -EBUSY; 1294 goto err_kfree; 1295 } 1296 1297 atdma->regs = ioremap(io->start, size); 1298 if (!atdma->regs) { 1299 err = -ENOMEM; 1300 goto err_release_r; 1301 } 1302 1303 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1304 if (IS_ERR(atdma->clk)) { 1305 err = PTR_ERR(atdma->clk); 1306 goto err_clk; 1307 } 1308 clk_enable(atdma->clk); 1309 1310 /* force dma off, just in case */ 1311 at_dma_off(atdma); 1312 1313 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1314 if (err) 1315 goto err_irq; 1316 1317 platform_set_drvdata(pdev, atdma); 1318 1319 /* create a pool of consistent memory blocks for hardware descriptors */ 1320 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1321 &pdev->dev, sizeof(struct at_desc), 1322 4 /* word alignment */, 0); 1323 if (!atdma->dma_desc_pool) { 1324 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1325 err = -ENOMEM; 1326 goto err_pool_create; 1327 } 1328 1329 /* clear any pending interrupt */ 1330 while (dma_readl(atdma, EBCISR)) 1331 cpu_relax(); 1332 1333 /* initialize channels related values */ 1334 INIT_LIST_HEAD(&atdma->dma_common.channels); 1335 for (i = 0; i < plat_dat->nr_channels; i++) { 1336 struct at_dma_chan *atchan = &atdma->chan[i]; 1337 1338 atchan->chan_common.device = &atdma->dma_common; 1339 dma_cookie_init(&atchan->chan_common); 1340 list_add_tail(&atchan->chan_common.device_node, 1341 &atdma->dma_common.channels); 1342 1343 atchan->ch_regs = atdma->regs + ch_regs(i); 1344 spin_lock_init(&atchan->lock); 1345 atchan->mask = 1 << i; 1346 1347 INIT_LIST_HEAD(&atchan->active_list); 1348 INIT_LIST_HEAD(&atchan->queue); 1349 INIT_LIST_HEAD(&atchan->free_list); 1350 1351 tasklet_init(&atchan->tasklet, atc_tasklet, 1352 (unsigned long)atchan); 1353 atc_enable_chan_irq(atdma, i); 1354 } 1355 1356 /* set base routines */ 1357 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1358 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1359 atdma->dma_common.device_tx_status = atc_tx_status; 1360 atdma->dma_common.device_issue_pending = atc_issue_pending; 1361 atdma->dma_common.dev = &pdev->dev; 1362 1363 /* set prep routines based on capability */ 1364 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1365 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1366 1367 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1368 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1369 /* controller can do slave DMA: can trigger cyclic transfers */ 1370 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1371 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1372 atdma->dma_common.device_control = atc_control; 1373 } 1374 1375 dma_writel(atdma, EN, AT_DMA_ENABLE); 1376 1377 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1378 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1379 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1380 plat_dat->nr_channels); 1381 1382 dma_async_device_register(&atdma->dma_common); 1383 1384 return 0; 1385 1386 err_pool_create: 1387 platform_set_drvdata(pdev, NULL); 1388 free_irq(platform_get_irq(pdev, 0), atdma); 1389 err_irq: 1390 clk_disable(atdma->clk); 1391 clk_put(atdma->clk); 1392 err_clk: 1393 iounmap(atdma->regs); 1394 atdma->regs = NULL; 1395 err_release_r: 1396 release_mem_region(io->start, size); 1397 err_kfree: 1398 kfree(atdma); 1399 return err; 1400 } 1401 1402 static int __exit at_dma_remove(struct platform_device *pdev) 1403 { 1404 struct at_dma *atdma = platform_get_drvdata(pdev); 1405 struct dma_chan *chan, *_chan; 1406 struct resource *io; 1407 1408 at_dma_off(atdma); 1409 dma_async_device_unregister(&atdma->dma_common); 1410 1411 dma_pool_destroy(atdma->dma_desc_pool); 1412 platform_set_drvdata(pdev, NULL); 1413 free_irq(platform_get_irq(pdev, 0), atdma); 1414 1415 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1416 device_node) { 1417 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1418 1419 /* Disable interrupts */ 1420 atc_disable_chan_irq(atdma, chan->chan_id); 1421 tasklet_disable(&atchan->tasklet); 1422 1423 tasklet_kill(&atchan->tasklet); 1424 list_del(&chan->device_node); 1425 } 1426 1427 clk_disable(atdma->clk); 1428 clk_put(atdma->clk); 1429 1430 iounmap(atdma->regs); 1431 atdma->regs = NULL; 1432 1433 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1434 release_mem_region(io->start, resource_size(io)); 1435 1436 kfree(atdma); 1437 1438 return 0; 1439 } 1440 1441 static void at_dma_shutdown(struct platform_device *pdev) 1442 { 1443 struct at_dma *atdma = platform_get_drvdata(pdev); 1444 1445 at_dma_off(platform_get_drvdata(pdev)); 1446 clk_disable(atdma->clk); 1447 } 1448 1449 static int at_dma_prepare(struct device *dev) 1450 { 1451 struct platform_device *pdev = to_platform_device(dev); 1452 struct at_dma *atdma = platform_get_drvdata(pdev); 1453 struct dma_chan *chan, *_chan; 1454 1455 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1456 device_node) { 1457 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1458 /* wait for transaction completion (except in cyclic case) */ 1459 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1460 return -EAGAIN; 1461 } 1462 return 0; 1463 } 1464 1465 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1466 { 1467 struct dma_chan *chan = &atchan->chan_common; 1468 1469 /* Channel should be paused by user 1470 * do it anyway even if it is not done already */ 1471 if (!atc_chan_is_paused(atchan)) { 1472 dev_warn(chan2dev(chan), 1473 "cyclic channel not paused, should be done by channel user\n"); 1474 atc_control(chan, DMA_PAUSE, 0); 1475 } 1476 1477 /* now preserve additional data for cyclic operations */ 1478 /* next descriptor address in the cyclic list */ 1479 atchan->save_dscr = channel_readl(atchan, DSCR); 1480 1481 vdbg_dump_regs(atchan); 1482 } 1483 1484 static int at_dma_suspend_noirq(struct device *dev) 1485 { 1486 struct platform_device *pdev = to_platform_device(dev); 1487 struct at_dma *atdma = platform_get_drvdata(pdev); 1488 struct dma_chan *chan, *_chan; 1489 1490 /* preserve data */ 1491 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1492 device_node) { 1493 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1494 1495 if (atc_chan_is_cyclic(atchan)) 1496 atc_suspend_cyclic(atchan); 1497 atchan->save_cfg = channel_readl(atchan, CFG); 1498 } 1499 atdma->save_imr = dma_readl(atdma, EBCIMR); 1500 1501 /* disable DMA controller */ 1502 at_dma_off(atdma); 1503 clk_disable(atdma->clk); 1504 return 0; 1505 } 1506 1507 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1508 { 1509 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1510 1511 /* restore channel status for cyclic descriptors list: 1512 * next descriptor in the cyclic list at the time of suspend */ 1513 channel_writel(atchan, SADDR, 0); 1514 channel_writel(atchan, DADDR, 0); 1515 channel_writel(atchan, CTRLA, 0); 1516 channel_writel(atchan, CTRLB, 0); 1517 channel_writel(atchan, DSCR, atchan->save_dscr); 1518 dma_writel(atdma, CHER, atchan->mask); 1519 1520 /* channel pause status should be removed by channel user 1521 * We cannot take the initiative to do it here */ 1522 1523 vdbg_dump_regs(atchan); 1524 } 1525 1526 static int at_dma_resume_noirq(struct device *dev) 1527 { 1528 struct platform_device *pdev = to_platform_device(dev); 1529 struct at_dma *atdma = platform_get_drvdata(pdev); 1530 struct dma_chan *chan, *_chan; 1531 1532 /* bring back DMA controller */ 1533 clk_enable(atdma->clk); 1534 dma_writel(atdma, EN, AT_DMA_ENABLE); 1535 1536 /* clear any pending interrupt */ 1537 while (dma_readl(atdma, EBCISR)) 1538 cpu_relax(); 1539 1540 /* restore saved data */ 1541 dma_writel(atdma, EBCIER, atdma->save_imr); 1542 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1543 device_node) { 1544 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1545 1546 channel_writel(atchan, CFG, atchan->save_cfg); 1547 if (atc_chan_is_cyclic(atchan)) 1548 atc_resume_cyclic(atchan); 1549 } 1550 return 0; 1551 } 1552 1553 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1554 .prepare = at_dma_prepare, 1555 .suspend_noirq = at_dma_suspend_noirq, 1556 .resume_noirq = at_dma_resume_noirq, 1557 }; 1558 1559 static struct platform_driver at_dma_driver = { 1560 .remove = __exit_p(at_dma_remove), 1561 .shutdown = at_dma_shutdown, 1562 .id_table = atdma_devtypes, 1563 .driver = { 1564 .name = "at_hdmac", 1565 .pm = &at_dma_dev_pm_ops, 1566 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1567 }, 1568 }; 1569 1570 static int __init at_dma_init(void) 1571 { 1572 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1573 } 1574 subsys_initcall(at_dma_init); 1575 1576 static void __exit at_dma_exit(void) 1577 { 1578 platform_driver_unregister(&at_dma_driver); 1579 } 1580 module_exit(at_dma_exit); 1581 1582 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1583 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1584 MODULE_LICENSE("GPL"); 1585 MODULE_ALIAS("platform:at_hdmac"); 1586