1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller, 13 * 14 * The driver has currently been tested with the Atmel AT91SAM9RL 15 * and AT91SAM9G45 series. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dmapool.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 29 #include "at_hdmac_regs.h" 30 #include "dmaengine.h" 31 32 /* 33 * Glossary 34 * -------- 35 * 36 * at_hdmac : Name of the ATmel AHB DMA Controller 37 * at_dma_ / atdma : ATmel DMA controller entity related 38 * atc_ / atchan : ATmel DMA Channel entity related 39 */ 40 41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43 |ATC_DIF(AT_DMA_MEM_IF)) 44 45 /* 46 * Initial number of descriptors to allocate for each channel. This could 47 * be increased during dma usage. 48 */ 49 static unsigned int init_nr_desc_per_channel = 64; 50 module_param(init_nr_desc_per_channel, uint, 0644); 51 MODULE_PARM_DESC(init_nr_desc_per_channel, 52 "initial descriptors per channel (default: 64)"); 53 54 55 /* prototypes */ 56 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 57 58 59 /*----------------------------------------------------------------------*/ 60 61 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 62 { 63 return list_first_entry(&atchan->active_list, 64 struct at_desc, desc_node); 65 } 66 67 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 68 { 69 return list_first_entry(&atchan->queue, 70 struct at_desc, desc_node); 71 } 72 73 /** 74 * atc_alloc_descriptor - allocate and return an initialized descriptor 75 * @chan: the channel to allocate descriptors for 76 * @gfp_flags: GFP allocation flags 77 * 78 * Note: The ack-bit is positioned in the descriptor flag at creation time 79 * to make initial allocation more convenient. This bit will be cleared 80 * and control will be given to client at usage time (during 81 * preparation functions). 82 */ 83 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 84 gfp_t gfp_flags) 85 { 86 struct at_desc *desc = NULL; 87 struct at_dma *atdma = to_at_dma(chan->device); 88 dma_addr_t phys; 89 90 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 91 if (desc) { 92 memset(desc, 0, sizeof(struct at_desc)); 93 INIT_LIST_HEAD(&desc->tx_list); 94 dma_async_tx_descriptor_init(&desc->txd, chan); 95 /* txd.flags will be overwritten in prep functions */ 96 desc->txd.flags = DMA_CTRL_ACK; 97 desc->txd.tx_submit = atc_tx_submit; 98 desc->txd.phys = phys; 99 } 100 101 return desc; 102 } 103 104 /** 105 * atc_desc_get - get an unused descriptor from free_list 106 * @atchan: channel we want a new descriptor for 107 */ 108 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 109 { 110 struct at_desc *desc, *_desc; 111 struct at_desc *ret = NULL; 112 unsigned long flags; 113 unsigned int i = 0; 114 LIST_HEAD(tmp_list); 115 116 spin_lock_irqsave(&atchan->lock, flags); 117 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 118 i++; 119 if (async_tx_test_ack(&desc->txd)) { 120 list_del(&desc->desc_node); 121 ret = desc; 122 break; 123 } 124 dev_dbg(chan2dev(&atchan->chan_common), 125 "desc %p not ACKed\n", desc); 126 } 127 spin_unlock_irqrestore(&atchan->lock, flags); 128 dev_vdbg(chan2dev(&atchan->chan_common), 129 "scanned %u descriptors on freelist\n", i); 130 131 /* no more descriptor available in initial pool: create one more */ 132 if (!ret) { 133 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 134 if (ret) { 135 spin_lock_irqsave(&atchan->lock, flags); 136 atchan->descs_allocated++; 137 spin_unlock_irqrestore(&atchan->lock, flags); 138 } else { 139 dev_err(chan2dev(&atchan->chan_common), 140 "not enough descriptors available\n"); 141 } 142 } 143 144 return ret; 145 } 146 147 /** 148 * atc_desc_put - move a descriptor, including any children, to the free list 149 * @atchan: channel we work on 150 * @desc: descriptor, at the head of a chain, to move to free list 151 */ 152 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 153 { 154 if (desc) { 155 struct at_desc *child; 156 unsigned long flags; 157 158 spin_lock_irqsave(&atchan->lock, flags); 159 list_for_each_entry(child, &desc->tx_list, desc_node) 160 dev_vdbg(chan2dev(&atchan->chan_common), 161 "moving child desc %p to freelist\n", 162 child); 163 list_splice_init(&desc->tx_list, &atchan->free_list); 164 dev_vdbg(chan2dev(&atchan->chan_common), 165 "moving desc %p to freelist\n", desc); 166 list_add(&desc->desc_node, &atchan->free_list); 167 spin_unlock_irqrestore(&atchan->lock, flags); 168 } 169 } 170 171 /** 172 * atc_desc_chain - build chain adding a descripor 173 * @first: address of first descripor of the chain 174 * @prev: address of previous descripor of the chain 175 * @desc: descriptor to queue 176 * 177 * Called from prep_* functions 178 */ 179 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 180 struct at_desc *desc) 181 { 182 if (!(*first)) { 183 *first = desc; 184 } else { 185 /* inform the HW lli about chaining */ 186 (*prev)->lli.dscr = desc->txd.phys; 187 /* insert the link descriptor to the LD ring */ 188 list_add_tail(&desc->desc_node, 189 &(*first)->tx_list); 190 } 191 *prev = desc; 192 } 193 194 /** 195 * atc_dostart - starts the DMA engine for real 196 * @atchan: the channel we want to start 197 * @first: first descriptor in the list we want to begin with 198 * 199 * Called with atchan->lock held and bh disabled 200 */ 201 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 202 { 203 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 204 205 /* ASSERT: channel is idle */ 206 if (atc_chan_is_enabled(atchan)) { 207 dev_err(chan2dev(&atchan->chan_common), 208 "BUG: Attempted to start non-idle channel\n"); 209 dev_err(chan2dev(&atchan->chan_common), 210 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 211 channel_readl(atchan, SADDR), 212 channel_readl(atchan, DADDR), 213 channel_readl(atchan, CTRLA), 214 channel_readl(atchan, CTRLB), 215 channel_readl(atchan, DSCR)); 216 217 /* The tasklet will hopefully advance the queue... */ 218 return; 219 } 220 221 vdbg_dump_regs(atchan); 222 223 channel_writel(atchan, SADDR, 0); 224 channel_writel(atchan, DADDR, 0); 225 channel_writel(atchan, CTRLA, 0); 226 channel_writel(atchan, CTRLB, 0); 227 channel_writel(atchan, DSCR, first->txd.phys); 228 dma_writel(atdma, CHER, atchan->mask); 229 230 vdbg_dump_regs(atchan); 231 } 232 233 /** 234 * atc_chain_complete - finish work for one transaction chain 235 * @atchan: channel we work on 236 * @desc: descriptor at the head of the chain we want do complete 237 * 238 * Called with atchan->lock held and bh disabled */ 239 static void 240 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 241 { 242 struct dma_async_tx_descriptor *txd = &desc->txd; 243 244 dev_vdbg(chan2dev(&atchan->chan_common), 245 "descriptor %u complete\n", txd->cookie); 246 247 /* mark the descriptor as complete for non cyclic cases only */ 248 if (!atc_chan_is_cyclic(atchan)) 249 dma_cookie_complete(txd); 250 251 /* move children to free_list */ 252 list_splice_init(&desc->tx_list, &atchan->free_list); 253 /* move myself to free_list */ 254 list_move(&desc->desc_node, &atchan->free_list); 255 256 /* unmap dma addresses (not on slave channels) */ 257 if (!atchan->chan_common.private) { 258 struct device *parent = chan2parent(&atchan->chan_common); 259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 260 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 261 dma_unmap_single(parent, 262 desc->lli.daddr, 263 desc->len, DMA_FROM_DEVICE); 264 else 265 dma_unmap_page(parent, 266 desc->lli.daddr, 267 desc->len, DMA_FROM_DEVICE); 268 } 269 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 270 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 271 dma_unmap_single(parent, 272 desc->lli.saddr, 273 desc->len, DMA_TO_DEVICE); 274 else 275 dma_unmap_page(parent, 276 desc->lli.saddr, 277 desc->len, DMA_TO_DEVICE); 278 } 279 } 280 281 /* for cyclic transfers, 282 * no need to replay callback function while stopping */ 283 if (!atc_chan_is_cyclic(atchan)) { 284 dma_async_tx_callback callback = txd->callback; 285 void *param = txd->callback_param; 286 287 /* 288 * The API requires that no submissions are done from a 289 * callback, so we don't need to drop the lock here 290 */ 291 if (callback) 292 callback(param); 293 } 294 295 dma_run_dependencies(txd); 296 } 297 298 /** 299 * atc_complete_all - finish work for all transactions 300 * @atchan: channel to complete transactions for 301 * 302 * Eventually submit queued descriptors if any 303 * 304 * Assume channel is idle while calling this function 305 * Called with atchan->lock held and bh disabled 306 */ 307 static void atc_complete_all(struct at_dma_chan *atchan) 308 { 309 struct at_desc *desc, *_desc; 310 LIST_HEAD(list); 311 312 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 313 314 BUG_ON(atc_chan_is_enabled(atchan)); 315 316 /* 317 * Submit queued descriptors ASAP, i.e. before we go through 318 * the completed ones. 319 */ 320 if (!list_empty(&atchan->queue)) 321 atc_dostart(atchan, atc_first_queued(atchan)); 322 /* empty active_list now it is completed */ 323 list_splice_init(&atchan->active_list, &list); 324 /* empty queue list by moving descriptors (if any) to active_list */ 325 list_splice_init(&atchan->queue, &atchan->active_list); 326 327 list_for_each_entry_safe(desc, _desc, &list, desc_node) 328 atc_chain_complete(atchan, desc); 329 } 330 331 /** 332 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 333 * @atchan: channel to be cleaned up 334 * 335 * Called with atchan->lock held and bh disabled 336 */ 337 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 338 { 339 struct at_desc *desc, *_desc; 340 struct at_desc *child; 341 342 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 343 344 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 345 if (!(desc->lli.ctrla & ATC_DONE)) 346 /* This one is currently in progress */ 347 return; 348 349 list_for_each_entry(child, &desc->tx_list, desc_node) 350 if (!(child->lli.ctrla & ATC_DONE)) 351 /* Currently in progress */ 352 return; 353 354 /* 355 * No descriptors so far seem to be in progress, i.e. 356 * this chain must be done. 357 */ 358 atc_chain_complete(atchan, desc); 359 } 360 } 361 362 /** 363 * atc_advance_work - at the end of a transaction, move forward 364 * @atchan: channel where the transaction ended 365 * 366 * Called with atchan->lock held and bh disabled 367 */ 368 static void atc_advance_work(struct at_dma_chan *atchan) 369 { 370 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 371 372 if (list_empty(&atchan->active_list) || 373 list_is_singular(&atchan->active_list)) { 374 atc_complete_all(atchan); 375 } else { 376 atc_chain_complete(atchan, atc_first_active(atchan)); 377 /* advance work */ 378 atc_dostart(atchan, atc_first_active(atchan)); 379 } 380 } 381 382 383 /** 384 * atc_handle_error - handle errors reported by DMA controller 385 * @atchan: channel where error occurs 386 * 387 * Called with atchan->lock held and bh disabled 388 */ 389 static void atc_handle_error(struct at_dma_chan *atchan) 390 { 391 struct at_desc *bad_desc; 392 struct at_desc *child; 393 394 /* 395 * The descriptor currently at the head of the active list is 396 * broked. Since we don't have any way to report errors, we'll 397 * just have to scream loudly and try to carry on. 398 */ 399 bad_desc = atc_first_active(atchan); 400 list_del_init(&bad_desc->desc_node); 401 402 /* As we are stopped, take advantage to push queued descriptors 403 * in active_list */ 404 list_splice_init(&atchan->queue, atchan->active_list.prev); 405 406 /* Try to restart the controller */ 407 if (!list_empty(&atchan->active_list)) 408 atc_dostart(atchan, atc_first_active(atchan)); 409 410 /* 411 * KERN_CRITICAL may seem harsh, but since this only happens 412 * when someone submits a bad physical address in a 413 * descriptor, we should consider ourselves lucky that the 414 * controller flagged an error instead of scribbling over 415 * random memory locations. 416 */ 417 dev_crit(chan2dev(&atchan->chan_common), 418 "Bad descriptor submitted for DMA!\n"); 419 dev_crit(chan2dev(&atchan->chan_common), 420 " cookie: %d\n", bad_desc->txd.cookie); 421 atc_dump_lli(atchan, &bad_desc->lli); 422 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 423 atc_dump_lli(atchan, &child->lli); 424 425 /* Pretend the descriptor completed successfully */ 426 atc_chain_complete(atchan, bad_desc); 427 } 428 429 /** 430 * atc_handle_cyclic - at the end of a period, run callback function 431 * @atchan: channel used for cyclic operations 432 * 433 * Called with atchan->lock held and bh disabled 434 */ 435 static void atc_handle_cyclic(struct at_dma_chan *atchan) 436 { 437 struct at_desc *first = atc_first_active(atchan); 438 struct dma_async_tx_descriptor *txd = &first->txd; 439 dma_async_tx_callback callback = txd->callback; 440 void *param = txd->callback_param; 441 442 dev_vdbg(chan2dev(&atchan->chan_common), 443 "new cyclic period llp 0x%08x\n", 444 channel_readl(atchan, DSCR)); 445 446 if (callback) 447 callback(param); 448 } 449 450 /*-- IRQ & Tasklet ---------------------------------------------------*/ 451 452 static void atc_tasklet(unsigned long data) 453 { 454 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 455 unsigned long flags; 456 457 spin_lock_irqsave(&atchan->lock, flags); 458 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 459 atc_handle_error(atchan); 460 else if (atc_chan_is_cyclic(atchan)) 461 atc_handle_cyclic(atchan); 462 else 463 atc_advance_work(atchan); 464 465 spin_unlock_irqrestore(&atchan->lock, flags); 466 } 467 468 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 469 { 470 struct at_dma *atdma = (struct at_dma *)dev_id; 471 struct at_dma_chan *atchan; 472 int i; 473 u32 status, pending, imr; 474 int ret = IRQ_NONE; 475 476 do { 477 imr = dma_readl(atdma, EBCIMR); 478 status = dma_readl(atdma, EBCISR); 479 pending = status & imr; 480 481 if (!pending) 482 break; 483 484 dev_vdbg(atdma->dma_common.dev, 485 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 486 status, imr, pending); 487 488 for (i = 0; i < atdma->dma_common.chancnt; i++) { 489 atchan = &atdma->chan[i]; 490 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 491 if (pending & AT_DMA_ERR(i)) { 492 /* Disable channel on AHB error */ 493 dma_writel(atdma, CHDR, 494 AT_DMA_RES(i) | atchan->mask); 495 /* Give information to tasklet */ 496 set_bit(ATC_IS_ERROR, &atchan->status); 497 } 498 tasklet_schedule(&atchan->tasklet); 499 ret = IRQ_HANDLED; 500 } 501 } 502 503 } while (pending); 504 505 return ret; 506 } 507 508 509 /*-- DMA Engine API --------------------------------------------------*/ 510 511 /** 512 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 513 * @desc: descriptor at the head of the transaction chain 514 * 515 * Queue chain if DMA engine is working already 516 * 517 * Cookie increment and adding to active_list or queue must be atomic 518 */ 519 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 520 { 521 struct at_desc *desc = txd_to_at_desc(tx); 522 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 523 dma_cookie_t cookie; 524 unsigned long flags; 525 526 spin_lock_irqsave(&atchan->lock, flags); 527 cookie = dma_cookie_assign(tx); 528 529 if (list_empty(&atchan->active_list)) { 530 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 531 desc->txd.cookie); 532 atc_dostart(atchan, desc); 533 list_add_tail(&desc->desc_node, &atchan->active_list); 534 } else { 535 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 536 desc->txd.cookie); 537 list_add_tail(&desc->desc_node, &atchan->queue); 538 } 539 540 spin_unlock_irqrestore(&atchan->lock, flags); 541 542 return cookie; 543 } 544 545 /** 546 * atc_prep_dma_memcpy - prepare a memcpy operation 547 * @chan: the channel to prepare operation on 548 * @dest: operation virtual destination address 549 * @src: operation virtual source address 550 * @len: operation length 551 * @flags: tx descriptor status flags 552 */ 553 static struct dma_async_tx_descriptor * 554 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 555 size_t len, unsigned long flags) 556 { 557 struct at_dma_chan *atchan = to_at_dma_chan(chan); 558 struct at_desc *desc = NULL; 559 struct at_desc *first = NULL; 560 struct at_desc *prev = NULL; 561 size_t xfer_count; 562 size_t offset; 563 unsigned int src_width; 564 unsigned int dst_width; 565 u32 ctrla; 566 u32 ctrlb; 567 568 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 569 dest, src, len, flags); 570 571 if (unlikely(!len)) { 572 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 573 return NULL; 574 } 575 576 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 577 | ATC_SRC_ADDR_MODE_INCR 578 | ATC_DST_ADDR_MODE_INCR 579 | ATC_FC_MEM2MEM; 580 581 /* 582 * We can be a lot more clever here, but this should take care 583 * of the most common optimization. 584 */ 585 if (!((src | dest | len) & 3)) { 586 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 587 src_width = dst_width = 2; 588 } else if (!((src | dest | len) & 1)) { 589 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 590 src_width = dst_width = 1; 591 } else { 592 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 593 src_width = dst_width = 0; 594 } 595 596 for (offset = 0; offset < len; offset += xfer_count << src_width) { 597 xfer_count = min_t(size_t, (len - offset) >> src_width, 598 ATC_BTSIZE_MAX); 599 600 desc = atc_desc_get(atchan); 601 if (!desc) 602 goto err_desc_get; 603 604 desc->lli.saddr = src + offset; 605 desc->lli.daddr = dest + offset; 606 desc->lli.ctrla = ctrla | xfer_count; 607 desc->lli.ctrlb = ctrlb; 608 609 desc->txd.cookie = 0; 610 611 atc_desc_chain(&first, &prev, desc); 612 } 613 614 /* First descriptor of the chain embedds additional information */ 615 first->txd.cookie = -EBUSY; 616 first->len = len; 617 618 /* set end-of-link to the last link descriptor of list*/ 619 set_desc_eol(desc); 620 621 first->txd.flags = flags; /* client is in control of this ack */ 622 623 return &first->txd; 624 625 err_desc_get: 626 atc_desc_put(atchan, first); 627 return NULL; 628 } 629 630 631 /** 632 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 633 * @chan: DMA channel 634 * @sgl: scatterlist to transfer to/from 635 * @sg_len: number of entries in @scatterlist 636 * @direction: DMA direction 637 * @flags: tx descriptor status flags 638 * @context: transaction context (ignored) 639 */ 640 static struct dma_async_tx_descriptor * 641 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 642 unsigned int sg_len, enum dma_transfer_direction direction, 643 unsigned long flags, void *context) 644 { 645 struct at_dma_chan *atchan = to_at_dma_chan(chan); 646 struct at_dma_slave *atslave = chan->private; 647 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 648 struct at_desc *first = NULL; 649 struct at_desc *prev = NULL; 650 u32 ctrla; 651 u32 ctrlb; 652 dma_addr_t reg; 653 unsigned int reg_width; 654 unsigned int mem_width; 655 unsigned int i; 656 struct scatterlist *sg; 657 size_t total_len = 0; 658 659 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 660 sg_len, 661 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 662 flags); 663 664 if (unlikely(!atslave || !sg_len)) { 665 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 666 return NULL; 667 } 668 669 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 670 | ATC_DCSIZE(sconfig->dst_maxburst); 671 ctrlb = ATC_IEN; 672 673 switch (direction) { 674 case DMA_MEM_TO_DEV: 675 reg_width = convert_buswidth(sconfig->dst_addr_width); 676 ctrla |= ATC_DST_WIDTH(reg_width); 677 ctrlb |= ATC_DST_ADDR_MODE_FIXED 678 | ATC_SRC_ADDR_MODE_INCR 679 | ATC_FC_MEM2PER 680 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 681 reg = sconfig->dst_addr; 682 for_each_sg(sgl, sg, sg_len, i) { 683 struct at_desc *desc; 684 u32 len; 685 u32 mem; 686 687 desc = atc_desc_get(atchan); 688 if (!desc) 689 goto err_desc_get; 690 691 mem = sg_dma_address(sg); 692 len = sg_dma_len(sg); 693 mem_width = 2; 694 if (unlikely(mem & 3 || len & 3)) 695 mem_width = 0; 696 697 desc->lli.saddr = mem; 698 desc->lli.daddr = reg; 699 desc->lli.ctrla = ctrla 700 | ATC_SRC_WIDTH(mem_width) 701 | len >> mem_width; 702 desc->lli.ctrlb = ctrlb; 703 704 atc_desc_chain(&first, &prev, desc); 705 total_len += len; 706 } 707 break; 708 case DMA_DEV_TO_MEM: 709 reg_width = convert_buswidth(sconfig->src_addr_width); 710 ctrla |= ATC_SRC_WIDTH(reg_width); 711 ctrlb |= ATC_DST_ADDR_MODE_INCR 712 | ATC_SRC_ADDR_MODE_FIXED 713 | ATC_FC_PER2MEM 714 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 715 716 reg = sconfig->src_addr; 717 for_each_sg(sgl, sg, sg_len, i) { 718 struct at_desc *desc; 719 u32 len; 720 u32 mem; 721 722 desc = atc_desc_get(atchan); 723 if (!desc) 724 goto err_desc_get; 725 726 mem = sg_dma_address(sg); 727 len = sg_dma_len(sg); 728 mem_width = 2; 729 if (unlikely(mem & 3 || len & 3)) 730 mem_width = 0; 731 732 desc->lli.saddr = reg; 733 desc->lli.daddr = mem; 734 desc->lli.ctrla = ctrla 735 | ATC_DST_WIDTH(mem_width) 736 | len >> reg_width; 737 desc->lli.ctrlb = ctrlb; 738 739 atc_desc_chain(&first, &prev, desc); 740 total_len += len; 741 } 742 break; 743 default: 744 return NULL; 745 } 746 747 /* set end-of-link to the last link descriptor of list*/ 748 set_desc_eol(prev); 749 750 /* First descriptor of the chain embedds additional information */ 751 first->txd.cookie = -EBUSY; 752 first->len = total_len; 753 754 /* first link descriptor of list is responsible of flags */ 755 first->txd.flags = flags; /* client is in control of this ack */ 756 757 return &first->txd; 758 759 err_desc_get: 760 dev_err(chan2dev(chan), "not enough descriptors available\n"); 761 atc_desc_put(atchan, first); 762 return NULL; 763 } 764 765 /** 766 * atc_dma_cyclic_check_values 767 * Check for too big/unaligned periods and unaligned DMA buffer 768 */ 769 static int 770 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 771 size_t period_len, enum dma_transfer_direction direction) 772 { 773 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 774 goto err_out; 775 if (unlikely(period_len & ((1 << reg_width) - 1))) 776 goto err_out; 777 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 778 goto err_out; 779 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) 780 goto err_out; 781 782 return 0; 783 784 err_out: 785 return -EINVAL; 786 } 787 788 /** 789 * atc_dma_cyclic_fill_desc - Fill one period decriptor 790 */ 791 static int 792 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 793 unsigned int period_index, dma_addr_t buf_addr, 794 unsigned int reg_width, size_t period_len, 795 enum dma_transfer_direction direction) 796 { 797 struct at_dma_chan *atchan = to_at_dma_chan(chan); 798 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 799 u32 ctrla; 800 801 /* prepare common CRTLA value */ 802 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 803 | ATC_DCSIZE(sconfig->dst_maxburst) 804 | ATC_DST_WIDTH(reg_width) 805 | ATC_SRC_WIDTH(reg_width) 806 | period_len >> reg_width; 807 808 switch (direction) { 809 case DMA_MEM_TO_DEV: 810 desc->lli.saddr = buf_addr + (period_len * period_index); 811 desc->lli.daddr = sconfig->dst_addr; 812 desc->lli.ctrla = ctrla; 813 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 814 | ATC_SRC_ADDR_MODE_INCR 815 | ATC_FC_MEM2PER 816 | ATC_SIF(AT_DMA_MEM_IF) 817 | ATC_DIF(AT_DMA_PER_IF); 818 break; 819 820 case DMA_DEV_TO_MEM: 821 desc->lli.saddr = sconfig->src_addr; 822 desc->lli.daddr = buf_addr + (period_len * period_index); 823 desc->lli.ctrla = ctrla; 824 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 825 | ATC_SRC_ADDR_MODE_FIXED 826 | ATC_FC_PER2MEM 827 | ATC_SIF(AT_DMA_PER_IF) 828 | ATC_DIF(AT_DMA_MEM_IF); 829 break; 830 831 default: 832 return -EINVAL; 833 } 834 835 return 0; 836 } 837 838 /** 839 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 840 * @chan: the DMA channel to prepare 841 * @buf_addr: physical DMA address where the buffer starts 842 * @buf_len: total number of bytes for the entire buffer 843 * @period_len: number of bytes for each period 844 * @direction: transfer direction, to or from device 845 * @context: transfer context (ignored) 846 */ 847 static struct dma_async_tx_descriptor * 848 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 849 size_t period_len, enum dma_transfer_direction direction, 850 void *context) 851 { 852 struct at_dma_chan *atchan = to_at_dma_chan(chan); 853 struct at_dma_slave *atslave = chan->private; 854 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 855 struct at_desc *first = NULL; 856 struct at_desc *prev = NULL; 857 unsigned long was_cyclic; 858 unsigned int reg_width; 859 unsigned int periods = buf_len / period_len; 860 unsigned int i; 861 862 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 863 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 864 buf_addr, 865 periods, buf_len, period_len); 866 867 if (unlikely(!atslave || !buf_len || !period_len)) { 868 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 869 return NULL; 870 } 871 872 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 873 if (was_cyclic) { 874 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 875 return NULL; 876 } 877 878 if (sconfig->direction == DMA_MEM_TO_DEV) 879 reg_width = convert_buswidth(sconfig->dst_addr_width); 880 else 881 reg_width = convert_buswidth(sconfig->src_addr_width); 882 883 /* Check for too big/unaligned periods and unaligned DMA buffer */ 884 if (atc_dma_cyclic_check_values(reg_width, buf_addr, 885 period_len, direction)) 886 goto err_out; 887 888 /* build cyclic linked list */ 889 for (i = 0; i < periods; i++) { 890 struct at_desc *desc; 891 892 desc = atc_desc_get(atchan); 893 if (!desc) 894 goto err_desc_get; 895 896 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 897 reg_width, period_len, direction)) 898 goto err_desc_get; 899 900 atc_desc_chain(&first, &prev, desc); 901 } 902 903 /* lets make a cyclic list */ 904 prev->lli.dscr = first->txd.phys; 905 906 /* First descriptor of the chain embedds additional information */ 907 first->txd.cookie = -EBUSY; 908 first->len = buf_len; 909 910 return &first->txd; 911 912 err_desc_get: 913 dev_err(chan2dev(chan), "not enough descriptors available\n"); 914 atc_desc_put(atchan, first); 915 err_out: 916 clear_bit(ATC_IS_CYCLIC, &atchan->status); 917 return NULL; 918 } 919 920 static int set_runtime_config(struct dma_chan *chan, 921 struct dma_slave_config *sconfig) 922 { 923 struct at_dma_chan *atchan = to_at_dma_chan(chan); 924 925 /* Check if it is chan is configured for slave transfers */ 926 if (!chan->private) 927 return -EINVAL; 928 929 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 930 931 convert_burst(&atchan->dma_sconfig.src_maxburst); 932 convert_burst(&atchan->dma_sconfig.dst_maxburst); 933 934 return 0; 935 } 936 937 938 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 939 unsigned long arg) 940 { 941 struct at_dma_chan *atchan = to_at_dma_chan(chan); 942 struct at_dma *atdma = to_at_dma(chan->device); 943 int chan_id = atchan->chan_common.chan_id; 944 unsigned long flags; 945 946 LIST_HEAD(list); 947 948 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 949 950 if (cmd == DMA_PAUSE) { 951 spin_lock_irqsave(&atchan->lock, flags); 952 953 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 954 set_bit(ATC_IS_PAUSED, &atchan->status); 955 956 spin_unlock_irqrestore(&atchan->lock, flags); 957 } else if (cmd == DMA_RESUME) { 958 if (!atc_chan_is_paused(atchan)) 959 return 0; 960 961 spin_lock_irqsave(&atchan->lock, flags); 962 963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 964 clear_bit(ATC_IS_PAUSED, &atchan->status); 965 966 spin_unlock_irqrestore(&atchan->lock, flags); 967 } else if (cmd == DMA_TERMINATE_ALL) { 968 struct at_desc *desc, *_desc; 969 /* 970 * This is only called when something went wrong elsewhere, so 971 * we don't really care about the data. Just disable the 972 * channel. We still have to poll the channel enable bit due 973 * to AHB/HSB limitations. 974 */ 975 spin_lock_irqsave(&atchan->lock, flags); 976 977 /* disabling channel: must also remove suspend state */ 978 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 979 980 /* confirm that this channel is disabled */ 981 while (dma_readl(atdma, CHSR) & atchan->mask) 982 cpu_relax(); 983 984 /* active_list entries will end up before queued entries */ 985 list_splice_init(&atchan->queue, &list); 986 list_splice_init(&atchan->active_list, &list); 987 988 /* Flush all pending and queued descriptors */ 989 list_for_each_entry_safe(desc, _desc, &list, desc_node) 990 atc_chain_complete(atchan, desc); 991 992 clear_bit(ATC_IS_PAUSED, &atchan->status); 993 /* if channel dedicated to cyclic operations, free it */ 994 clear_bit(ATC_IS_CYCLIC, &atchan->status); 995 996 spin_unlock_irqrestore(&atchan->lock, flags); 997 } else if (cmd == DMA_SLAVE_CONFIG) { 998 return set_runtime_config(chan, (struct dma_slave_config *)arg); 999 } else { 1000 return -ENXIO; 1001 } 1002 1003 return 0; 1004 } 1005 1006 /** 1007 * atc_tx_status - poll for transaction completion 1008 * @chan: DMA channel 1009 * @cookie: transaction identifier to check status of 1010 * @txstate: if not %NULL updated with transaction state 1011 * 1012 * If @txstate is passed in, upon return it reflect the driver 1013 * internal state and can be used with dma_async_is_complete() to check 1014 * the status of multiple cookies without re-checking hardware state. 1015 */ 1016 static enum dma_status 1017 atc_tx_status(struct dma_chan *chan, 1018 dma_cookie_t cookie, 1019 struct dma_tx_state *txstate) 1020 { 1021 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1022 dma_cookie_t last_used; 1023 dma_cookie_t last_complete; 1024 unsigned long flags; 1025 enum dma_status ret; 1026 1027 spin_lock_irqsave(&atchan->lock, flags); 1028 1029 ret = dma_cookie_status(chan, cookie, txstate); 1030 if (ret != DMA_SUCCESS) { 1031 atc_cleanup_descriptors(atchan); 1032 1033 ret = dma_cookie_status(chan, cookie, txstate); 1034 } 1035 1036 last_complete = chan->completed_cookie; 1037 last_used = chan->cookie; 1038 1039 spin_unlock_irqrestore(&atchan->lock, flags); 1040 1041 if (ret != DMA_SUCCESS) 1042 dma_set_residue(txstate, atc_first_active(atchan)->len); 1043 1044 if (atc_chan_is_paused(atchan)) 1045 ret = DMA_PAUSED; 1046 1047 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1048 ret, cookie, last_complete ? last_complete : 0, 1049 last_used ? last_used : 0); 1050 1051 return ret; 1052 } 1053 1054 /** 1055 * atc_issue_pending - try to finish work 1056 * @chan: target DMA channel 1057 */ 1058 static void atc_issue_pending(struct dma_chan *chan) 1059 { 1060 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1061 unsigned long flags; 1062 1063 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1064 1065 /* Not needed for cyclic transfers */ 1066 if (atc_chan_is_cyclic(atchan)) 1067 return; 1068 1069 spin_lock_irqsave(&atchan->lock, flags); 1070 if (!atc_chan_is_enabled(atchan)) { 1071 atc_advance_work(atchan); 1072 } 1073 spin_unlock_irqrestore(&atchan->lock, flags); 1074 } 1075 1076 /** 1077 * atc_alloc_chan_resources - allocate resources for DMA channel 1078 * @chan: allocate descriptor resources for this channel 1079 * @client: current client requesting the channel be ready for requests 1080 * 1081 * return - the number of allocated descriptors 1082 */ 1083 static int atc_alloc_chan_resources(struct dma_chan *chan) 1084 { 1085 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1086 struct at_dma *atdma = to_at_dma(chan->device); 1087 struct at_desc *desc; 1088 struct at_dma_slave *atslave; 1089 unsigned long flags; 1090 int i; 1091 u32 cfg; 1092 LIST_HEAD(tmp_list); 1093 1094 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1095 1096 /* ASSERT: channel is idle */ 1097 if (atc_chan_is_enabled(atchan)) { 1098 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1099 return -EIO; 1100 } 1101 1102 cfg = ATC_DEFAULT_CFG; 1103 1104 atslave = chan->private; 1105 if (atslave) { 1106 /* 1107 * We need controller-specific data to set up slave 1108 * transfers. 1109 */ 1110 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1111 1112 /* if cfg configuration specified take it instad of default */ 1113 if (atslave->cfg) 1114 cfg = atslave->cfg; 1115 } 1116 1117 /* have we already been set up? 1118 * reconfigure channel but no need to reallocate descriptors */ 1119 if (!list_empty(&atchan->free_list)) 1120 return atchan->descs_allocated; 1121 1122 /* Allocate initial pool of descriptors */ 1123 for (i = 0; i < init_nr_desc_per_channel; i++) { 1124 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1125 if (!desc) { 1126 dev_err(atdma->dma_common.dev, 1127 "Only %d initial descriptors\n", i); 1128 break; 1129 } 1130 list_add_tail(&desc->desc_node, &tmp_list); 1131 } 1132 1133 spin_lock_irqsave(&atchan->lock, flags); 1134 atchan->descs_allocated = i; 1135 list_splice(&tmp_list, &atchan->free_list); 1136 dma_cookie_init(chan); 1137 spin_unlock_irqrestore(&atchan->lock, flags); 1138 1139 /* channel parameters */ 1140 channel_writel(atchan, CFG, cfg); 1141 1142 dev_dbg(chan2dev(chan), 1143 "alloc_chan_resources: allocated %d descriptors\n", 1144 atchan->descs_allocated); 1145 1146 return atchan->descs_allocated; 1147 } 1148 1149 /** 1150 * atc_free_chan_resources - free all channel resources 1151 * @chan: DMA channel 1152 */ 1153 static void atc_free_chan_resources(struct dma_chan *chan) 1154 { 1155 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1156 struct at_dma *atdma = to_at_dma(chan->device); 1157 struct at_desc *desc, *_desc; 1158 LIST_HEAD(list); 1159 1160 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1161 atchan->descs_allocated); 1162 1163 /* ASSERT: channel is idle */ 1164 BUG_ON(!list_empty(&atchan->active_list)); 1165 BUG_ON(!list_empty(&atchan->queue)); 1166 BUG_ON(atc_chan_is_enabled(atchan)); 1167 1168 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1169 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1170 list_del(&desc->desc_node); 1171 /* free link descriptor */ 1172 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1173 } 1174 list_splice_init(&atchan->free_list, &list); 1175 atchan->descs_allocated = 0; 1176 atchan->status = 0; 1177 1178 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1179 } 1180 1181 1182 /*-- Module Management -----------------------------------------------*/ 1183 1184 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1185 static struct at_dma_platform_data at91sam9rl_config = { 1186 .nr_channels = 2, 1187 }; 1188 static struct at_dma_platform_data at91sam9g45_config = { 1189 .nr_channels = 8, 1190 }; 1191 1192 #if defined(CONFIG_OF) 1193 static const struct of_device_id atmel_dma_dt_ids[] = { 1194 { 1195 .compatible = "atmel,at91sam9rl-dma", 1196 .data = &at91sam9rl_config, 1197 }, { 1198 .compatible = "atmel,at91sam9g45-dma", 1199 .data = &at91sam9g45_config, 1200 }, { 1201 /* sentinel */ 1202 } 1203 }; 1204 1205 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1206 #endif 1207 1208 static const struct platform_device_id atdma_devtypes[] = { 1209 { 1210 .name = "at91sam9rl_dma", 1211 .driver_data = (unsigned long) &at91sam9rl_config, 1212 }, { 1213 .name = "at91sam9g45_dma", 1214 .driver_data = (unsigned long) &at91sam9g45_config, 1215 }, { 1216 /* sentinel */ 1217 } 1218 }; 1219 1220 static inline struct at_dma_platform_data * __init at_dma_get_driver_data( 1221 struct platform_device *pdev) 1222 { 1223 if (pdev->dev.of_node) { 1224 const struct of_device_id *match; 1225 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1226 if (match == NULL) 1227 return NULL; 1228 return match->data; 1229 } 1230 return (struct at_dma_platform_data *) 1231 platform_get_device_id(pdev)->driver_data; 1232 } 1233 1234 /** 1235 * at_dma_off - disable DMA controller 1236 * @atdma: the Atmel HDAMC device 1237 */ 1238 static void at_dma_off(struct at_dma *atdma) 1239 { 1240 dma_writel(atdma, EN, 0); 1241 1242 /* disable all interrupts */ 1243 dma_writel(atdma, EBCIDR, -1L); 1244 1245 /* confirm that all channels are disabled */ 1246 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1247 cpu_relax(); 1248 } 1249 1250 static int __init at_dma_probe(struct platform_device *pdev) 1251 { 1252 struct resource *io; 1253 struct at_dma *atdma; 1254 size_t size; 1255 int irq; 1256 int err; 1257 int i; 1258 struct at_dma_platform_data *plat_dat; 1259 1260 /* setup platform data for each SoC */ 1261 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1262 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1263 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1264 1265 /* get DMA parameters from controller type */ 1266 plat_dat = at_dma_get_driver_data(pdev); 1267 if (!plat_dat) 1268 return -ENODEV; 1269 1270 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1271 if (!io) 1272 return -EINVAL; 1273 1274 irq = platform_get_irq(pdev, 0); 1275 if (irq < 0) 1276 return irq; 1277 1278 size = sizeof(struct at_dma); 1279 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1280 atdma = kzalloc(size, GFP_KERNEL); 1281 if (!atdma) 1282 return -ENOMEM; 1283 1284 /* discover transaction capabilities */ 1285 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1286 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1287 1288 size = resource_size(io); 1289 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1290 err = -EBUSY; 1291 goto err_kfree; 1292 } 1293 1294 atdma->regs = ioremap(io->start, size); 1295 if (!atdma->regs) { 1296 err = -ENOMEM; 1297 goto err_release_r; 1298 } 1299 1300 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1301 if (IS_ERR(atdma->clk)) { 1302 err = PTR_ERR(atdma->clk); 1303 goto err_clk; 1304 } 1305 clk_enable(atdma->clk); 1306 1307 /* force dma off, just in case */ 1308 at_dma_off(atdma); 1309 1310 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1311 if (err) 1312 goto err_irq; 1313 1314 platform_set_drvdata(pdev, atdma); 1315 1316 /* create a pool of consistent memory blocks for hardware descriptors */ 1317 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1318 &pdev->dev, sizeof(struct at_desc), 1319 4 /* word alignment */, 0); 1320 if (!atdma->dma_desc_pool) { 1321 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1322 err = -ENOMEM; 1323 goto err_pool_create; 1324 } 1325 1326 /* clear any pending interrupt */ 1327 while (dma_readl(atdma, EBCISR)) 1328 cpu_relax(); 1329 1330 /* initialize channels related values */ 1331 INIT_LIST_HEAD(&atdma->dma_common.channels); 1332 for (i = 0; i < plat_dat->nr_channels; i++) { 1333 struct at_dma_chan *atchan = &atdma->chan[i]; 1334 1335 atchan->chan_common.device = &atdma->dma_common; 1336 dma_cookie_init(&atchan->chan_common); 1337 list_add_tail(&atchan->chan_common.device_node, 1338 &atdma->dma_common.channels); 1339 1340 atchan->ch_regs = atdma->regs + ch_regs(i); 1341 spin_lock_init(&atchan->lock); 1342 atchan->mask = 1 << i; 1343 1344 INIT_LIST_HEAD(&atchan->active_list); 1345 INIT_LIST_HEAD(&atchan->queue); 1346 INIT_LIST_HEAD(&atchan->free_list); 1347 1348 tasklet_init(&atchan->tasklet, atc_tasklet, 1349 (unsigned long)atchan); 1350 atc_enable_chan_irq(atdma, i); 1351 } 1352 1353 /* set base routines */ 1354 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1355 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1356 atdma->dma_common.device_tx_status = atc_tx_status; 1357 atdma->dma_common.device_issue_pending = atc_issue_pending; 1358 atdma->dma_common.dev = &pdev->dev; 1359 1360 /* set prep routines based on capability */ 1361 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1362 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1363 1364 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1365 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1366 /* controller can do slave DMA: can trigger cyclic transfers */ 1367 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1368 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1369 atdma->dma_common.device_control = atc_control; 1370 } 1371 1372 dma_writel(atdma, EN, AT_DMA_ENABLE); 1373 1374 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1375 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1376 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1377 plat_dat->nr_channels); 1378 1379 dma_async_device_register(&atdma->dma_common); 1380 1381 return 0; 1382 1383 err_pool_create: 1384 platform_set_drvdata(pdev, NULL); 1385 free_irq(platform_get_irq(pdev, 0), atdma); 1386 err_irq: 1387 clk_disable(atdma->clk); 1388 clk_put(atdma->clk); 1389 err_clk: 1390 iounmap(atdma->regs); 1391 atdma->regs = NULL; 1392 err_release_r: 1393 release_mem_region(io->start, size); 1394 err_kfree: 1395 kfree(atdma); 1396 return err; 1397 } 1398 1399 static int __exit at_dma_remove(struct platform_device *pdev) 1400 { 1401 struct at_dma *atdma = platform_get_drvdata(pdev); 1402 struct dma_chan *chan, *_chan; 1403 struct resource *io; 1404 1405 at_dma_off(atdma); 1406 dma_async_device_unregister(&atdma->dma_common); 1407 1408 dma_pool_destroy(atdma->dma_desc_pool); 1409 platform_set_drvdata(pdev, NULL); 1410 free_irq(platform_get_irq(pdev, 0), atdma); 1411 1412 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1413 device_node) { 1414 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1415 1416 /* Disable interrupts */ 1417 atc_disable_chan_irq(atdma, chan->chan_id); 1418 tasklet_disable(&atchan->tasklet); 1419 1420 tasklet_kill(&atchan->tasklet); 1421 list_del(&chan->device_node); 1422 } 1423 1424 clk_disable(atdma->clk); 1425 clk_put(atdma->clk); 1426 1427 iounmap(atdma->regs); 1428 atdma->regs = NULL; 1429 1430 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1431 release_mem_region(io->start, resource_size(io)); 1432 1433 kfree(atdma); 1434 1435 return 0; 1436 } 1437 1438 static void at_dma_shutdown(struct platform_device *pdev) 1439 { 1440 struct at_dma *atdma = platform_get_drvdata(pdev); 1441 1442 at_dma_off(platform_get_drvdata(pdev)); 1443 clk_disable(atdma->clk); 1444 } 1445 1446 static int at_dma_prepare(struct device *dev) 1447 { 1448 struct platform_device *pdev = to_platform_device(dev); 1449 struct at_dma *atdma = platform_get_drvdata(pdev); 1450 struct dma_chan *chan, *_chan; 1451 1452 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1453 device_node) { 1454 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1455 /* wait for transaction completion (except in cyclic case) */ 1456 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1457 return -EAGAIN; 1458 } 1459 return 0; 1460 } 1461 1462 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1463 { 1464 struct dma_chan *chan = &atchan->chan_common; 1465 1466 /* Channel should be paused by user 1467 * do it anyway even if it is not done already */ 1468 if (!atc_chan_is_paused(atchan)) { 1469 dev_warn(chan2dev(chan), 1470 "cyclic channel not paused, should be done by channel user\n"); 1471 atc_control(chan, DMA_PAUSE, 0); 1472 } 1473 1474 /* now preserve additional data for cyclic operations */ 1475 /* next descriptor address in the cyclic list */ 1476 atchan->save_dscr = channel_readl(atchan, DSCR); 1477 1478 vdbg_dump_regs(atchan); 1479 } 1480 1481 static int at_dma_suspend_noirq(struct device *dev) 1482 { 1483 struct platform_device *pdev = to_platform_device(dev); 1484 struct at_dma *atdma = platform_get_drvdata(pdev); 1485 struct dma_chan *chan, *_chan; 1486 1487 /* preserve data */ 1488 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1489 device_node) { 1490 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1491 1492 if (atc_chan_is_cyclic(atchan)) 1493 atc_suspend_cyclic(atchan); 1494 atchan->save_cfg = channel_readl(atchan, CFG); 1495 } 1496 atdma->save_imr = dma_readl(atdma, EBCIMR); 1497 1498 /* disable DMA controller */ 1499 at_dma_off(atdma); 1500 clk_disable(atdma->clk); 1501 return 0; 1502 } 1503 1504 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1505 { 1506 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1507 1508 /* restore channel status for cyclic descriptors list: 1509 * next descriptor in the cyclic list at the time of suspend */ 1510 channel_writel(atchan, SADDR, 0); 1511 channel_writel(atchan, DADDR, 0); 1512 channel_writel(atchan, CTRLA, 0); 1513 channel_writel(atchan, CTRLB, 0); 1514 channel_writel(atchan, DSCR, atchan->save_dscr); 1515 dma_writel(atdma, CHER, atchan->mask); 1516 1517 /* channel pause status should be removed by channel user 1518 * We cannot take the initiative to do it here */ 1519 1520 vdbg_dump_regs(atchan); 1521 } 1522 1523 static int at_dma_resume_noirq(struct device *dev) 1524 { 1525 struct platform_device *pdev = to_platform_device(dev); 1526 struct at_dma *atdma = platform_get_drvdata(pdev); 1527 struct dma_chan *chan, *_chan; 1528 1529 /* bring back DMA controller */ 1530 clk_enable(atdma->clk); 1531 dma_writel(atdma, EN, AT_DMA_ENABLE); 1532 1533 /* clear any pending interrupt */ 1534 while (dma_readl(atdma, EBCISR)) 1535 cpu_relax(); 1536 1537 /* restore saved data */ 1538 dma_writel(atdma, EBCIER, atdma->save_imr); 1539 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1540 device_node) { 1541 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1542 1543 channel_writel(atchan, CFG, atchan->save_cfg); 1544 if (atc_chan_is_cyclic(atchan)) 1545 atc_resume_cyclic(atchan); 1546 } 1547 return 0; 1548 } 1549 1550 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1551 .prepare = at_dma_prepare, 1552 .suspend_noirq = at_dma_suspend_noirq, 1553 .resume_noirq = at_dma_resume_noirq, 1554 }; 1555 1556 static struct platform_driver at_dma_driver = { 1557 .remove = __exit_p(at_dma_remove), 1558 .shutdown = at_dma_shutdown, 1559 .id_table = atdma_devtypes, 1560 .driver = { 1561 .name = "at_hdmac", 1562 .pm = &at_dma_dev_pm_ops, 1563 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1564 }, 1565 }; 1566 1567 static int __init at_dma_init(void) 1568 { 1569 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1570 } 1571 subsys_initcall(at_dma_init); 1572 1573 static void __exit at_dma_exit(void) 1574 { 1575 platform_driver_unregister(&at_dma_driver); 1576 } 1577 module_exit(at_dma_exit); 1578 1579 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1580 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1581 MODULE_LICENSE("GPL"); 1582 MODULE_ALIAS("platform:at_hdmac"); 1583