1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller, 13 * 14 * The driver has currently been tested with the Atmel AT91SAM9RL 15 * and AT91SAM9G45 series. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dmapool.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 27 #include "at_hdmac_regs.h" 28 29 /* 30 * Glossary 31 * -------- 32 * 33 * at_hdmac : Name of the ATmel AHB DMA Controller 34 * at_dma_ / atdma : ATmel DMA controller entity related 35 * atc_ / atchan : ATmel DMA Channel entity related 36 */ 37 38 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 39 #define ATC_DEFAULT_CTRLA (0) 40 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 41 |ATC_DIF(AT_DMA_MEM_IF)) 42 43 /* 44 * Initial number of descriptors to allocate for each channel. This could 45 * be increased during dma usage. 46 */ 47 static unsigned int init_nr_desc_per_channel = 64; 48 module_param(init_nr_desc_per_channel, uint, 0644); 49 MODULE_PARM_DESC(init_nr_desc_per_channel, 50 "initial descriptors per channel (default: 64)"); 51 52 53 /* prototypes */ 54 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 55 56 57 /*----------------------------------------------------------------------*/ 58 59 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 60 { 61 return list_first_entry(&atchan->active_list, 62 struct at_desc, desc_node); 63 } 64 65 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 66 { 67 return list_first_entry(&atchan->queue, 68 struct at_desc, desc_node); 69 } 70 71 /** 72 * atc_alloc_descriptor - allocate and return an initialized descriptor 73 * @chan: the channel to allocate descriptors for 74 * @gfp_flags: GFP allocation flags 75 * 76 * Note: The ack-bit is positioned in the descriptor flag at creation time 77 * to make initial allocation more convenient. This bit will be cleared 78 * and control will be given to client at usage time (during 79 * preparation functions). 80 */ 81 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 82 gfp_t gfp_flags) 83 { 84 struct at_desc *desc = NULL; 85 struct at_dma *atdma = to_at_dma(chan->device); 86 dma_addr_t phys; 87 88 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 89 if (desc) { 90 memset(desc, 0, sizeof(struct at_desc)); 91 INIT_LIST_HEAD(&desc->tx_list); 92 dma_async_tx_descriptor_init(&desc->txd, chan); 93 /* txd.flags will be overwritten in prep functions */ 94 desc->txd.flags = DMA_CTRL_ACK; 95 desc->txd.tx_submit = atc_tx_submit; 96 desc->txd.phys = phys; 97 } 98 99 return desc; 100 } 101 102 /** 103 * atc_desc_get - get an unused descriptor from free_list 104 * @atchan: channel we want a new descriptor for 105 */ 106 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 107 { 108 struct at_desc *desc, *_desc; 109 struct at_desc *ret = NULL; 110 unsigned long flags; 111 unsigned int i = 0; 112 LIST_HEAD(tmp_list); 113 114 spin_lock_irqsave(&atchan->lock, flags); 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 116 i++; 117 if (async_tx_test_ack(&desc->txd)) { 118 list_del(&desc->desc_node); 119 ret = desc; 120 break; 121 } 122 dev_dbg(chan2dev(&atchan->chan_common), 123 "desc %p not ACKed\n", desc); 124 } 125 spin_unlock_irqrestore(&atchan->lock, flags); 126 dev_vdbg(chan2dev(&atchan->chan_common), 127 "scanned %u descriptors on freelist\n", i); 128 129 /* no more descriptor available in initial pool: create one more */ 130 if (!ret) { 131 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 132 if (ret) { 133 spin_lock_irqsave(&atchan->lock, flags); 134 atchan->descs_allocated++; 135 spin_unlock_irqrestore(&atchan->lock, flags); 136 } else { 137 dev_err(chan2dev(&atchan->chan_common), 138 "not enough descriptors available\n"); 139 } 140 } 141 142 return ret; 143 } 144 145 /** 146 * atc_desc_put - move a descriptor, including any children, to the free list 147 * @atchan: channel we work on 148 * @desc: descriptor, at the head of a chain, to move to free list 149 */ 150 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 151 { 152 if (desc) { 153 struct at_desc *child; 154 unsigned long flags; 155 156 spin_lock_irqsave(&atchan->lock, flags); 157 list_for_each_entry(child, &desc->tx_list, desc_node) 158 dev_vdbg(chan2dev(&atchan->chan_common), 159 "moving child desc %p to freelist\n", 160 child); 161 list_splice_init(&desc->tx_list, &atchan->free_list); 162 dev_vdbg(chan2dev(&atchan->chan_common), 163 "moving desc %p to freelist\n", desc); 164 list_add(&desc->desc_node, &atchan->free_list); 165 spin_unlock_irqrestore(&atchan->lock, flags); 166 } 167 } 168 169 /** 170 * atc_desc_chain - build chain adding a descripor 171 * @first: address of first descripor of the chain 172 * @prev: address of previous descripor of the chain 173 * @desc: descriptor to queue 174 * 175 * Called from prep_* functions 176 */ 177 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 178 struct at_desc *desc) 179 { 180 if (!(*first)) { 181 *first = desc; 182 } else { 183 /* inform the HW lli about chaining */ 184 (*prev)->lli.dscr = desc->txd.phys; 185 /* insert the link descriptor to the LD ring */ 186 list_add_tail(&desc->desc_node, 187 &(*first)->tx_list); 188 } 189 *prev = desc; 190 } 191 192 /** 193 * atc_assign_cookie - compute and assign new cookie 194 * @atchan: channel we work on 195 * @desc: descriptor to assign cookie for 196 * 197 * Called with atchan->lock held and bh disabled 198 */ 199 static dma_cookie_t 200 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) 201 { 202 dma_cookie_t cookie = atchan->chan_common.cookie; 203 204 if (++cookie < 0) 205 cookie = 1; 206 207 atchan->chan_common.cookie = cookie; 208 desc->txd.cookie = cookie; 209 210 return cookie; 211 } 212 213 /** 214 * atc_dostart - starts the DMA engine for real 215 * @atchan: the channel we want to start 216 * @first: first descriptor in the list we want to begin with 217 * 218 * Called with atchan->lock held and bh disabled 219 */ 220 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 221 { 222 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 223 224 /* ASSERT: channel is idle */ 225 if (atc_chan_is_enabled(atchan)) { 226 dev_err(chan2dev(&atchan->chan_common), 227 "BUG: Attempted to start non-idle channel\n"); 228 dev_err(chan2dev(&atchan->chan_common), 229 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 230 channel_readl(atchan, SADDR), 231 channel_readl(atchan, DADDR), 232 channel_readl(atchan, CTRLA), 233 channel_readl(atchan, CTRLB), 234 channel_readl(atchan, DSCR)); 235 236 /* The tasklet will hopefully advance the queue... */ 237 return; 238 } 239 240 vdbg_dump_regs(atchan); 241 242 /* clear any pending interrupt */ 243 while (dma_readl(atdma, EBCISR)) 244 cpu_relax(); 245 246 channel_writel(atchan, SADDR, 0); 247 channel_writel(atchan, DADDR, 0); 248 channel_writel(atchan, CTRLA, 0); 249 channel_writel(atchan, CTRLB, 0); 250 channel_writel(atchan, DSCR, first->txd.phys); 251 dma_writel(atdma, CHER, atchan->mask); 252 253 vdbg_dump_regs(atchan); 254 } 255 256 /** 257 * atc_chain_complete - finish work for one transaction chain 258 * @atchan: channel we work on 259 * @desc: descriptor at the head of the chain we want do complete 260 * 261 * Called with atchan->lock held and bh disabled */ 262 static void 263 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 264 { 265 struct dma_async_tx_descriptor *txd = &desc->txd; 266 267 dev_vdbg(chan2dev(&atchan->chan_common), 268 "descriptor %u complete\n", txd->cookie); 269 270 atchan->completed_cookie = txd->cookie; 271 272 /* move children to free_list */ 273 list_splice_init(&desc->tx_list, &atchan->free_list); 274 /* move myself to free_list */ 275 list_move(&desc->desc_node, &atchan->free_list); 276 277 /* unmap dma addresses (not on slave channels) */ 278 if (!atchan->chan_common.private) { 279 struct device *parent = chan2parent(&atchan->chan_common); 280 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 281 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 282 dma_unmap_single(parent, 283 desc->lli.daddr, 284 desc->len, DMA_FROM_DEVICE); 285 else 286 dma_unmap_page(parent, 287 desc->lli.daddr, 288 desc->len, DMA_FROM_DEVICE); 289 } 290 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 291 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 292 dma_unmap_single(parent, 293 desc->lli.saddr, 294 desc->len, DMA_TO_DEVICE); 295 else 296 dma_unmap_page(parent, 297 desc->lli.saddr, 298 desc->len, DMA_TO_DEVICE); 299 } 300 } 301 302 /* for cyclic transfers, 303 * no need to replay callback function while stopping */ 304 if (!atc_chan_is_cyclic(atchan)) { 305 dma_async_tx_callback callback = txd->callback; 306 void *param = txd->callback_param; 307 308 /* 309 * The API requires that no submissions are done from a 310 * callback, so we don't need to drop the lock here 311 */ 312 if (callback) 313 callback(param); 314 } 315 316 dma_run_dependencies(txd); 317 } 318 319 /** 320 * atc_complete_all - finish work for all transactions 321 * @atchan: channel to complete transactions for 322 * 323 * Eventually submit queued descriptors if any 324 * 325 * Assume channel is idle while calling this function 326 * Called with atchan->lock held and bh disabled 327 */ 328 static void atc_complete_all(struct at_dma_chan *atchan) 329 { 330 struct at_desc *desc, *_desc; 331 LIST_HEAD(list); 332 333 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 334 335 BUG_ON(atc_chan_is_enabled(atchan)); 336 337 /* 338 * Submit queued descriptors ASAP, i.e. before we go through 339 * the completed ones. 340 */ 341 if (!list_empty(&atchan->queue)) 342 atc_dostart(atchan, atc_first_queued(atchan)); 343 /* empty active_list now it is completed */ 344 list_splice_init(&atchan->active_list, &list); 345 /* empty queue list by moving descriptors (if any) to active_list */ 346 list_splice_init(&atchan->queue, &atchan->active_list); 347 348 list_for_each_entry_safe(desc, _desc, &list, desc_node) 349 atc_chain_complete(atchan, desc); 350 } 351 352 /** 353 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 354 * @atchan: channel to be cleaned up 355 * 356 * Called with atchan->lock held and bh disabled 357 */ 358 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 359 { 360 struct at_desc *desc, *_desc; 361 struct at_desc *child; 362 363 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 364 365 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 366 if (!(desc->lli.ctrla & ATC_DONE)) 367 /* This one is currently in progress */ 368 return; 369 370 list_for_each_entry(child, &desc->tx_list, desc_node) 371 if (!(child->lli.ctrla & ATC_DONE)) 372 /* Currently in progress */ 373 return; 374 375 /* 376 * No descriptors so far seem to be in progress, i.e. 377 * this chain must be done. 378 */ 379 atc_chain_complete(atchan, desc); 380 } 381 } 382 383 /** 384 * atc_advance_work - at the end of a transaction, move forward 385 * @atchan: channel where the transaction ended 386 * 387 * Called with atchan->lock held and bh disabled 388 */ 389 static void atc_advance_work(struct at_dma_chan *atchan) 390 { 391 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 392 393 if (list_empty(&atchan->active_list) || 394 list_is_singular(&atchan->active_list)) { 395 atc_complete_all(atchan); 396 } else { 397 atc_chain_complete(atchan, atc_first_active(atchan)); 398 /* advance work */ 399 atc_dostart(atchan, atc_first_active(atchan)); 400 } 401 } 402 403 404 /** 405 * atc_handle_error - handle errors reported by DMA controller 406 * @atchan: channel where error occurs 407 * 408 * Called with atchan->lock held and bh disabled 409 */ 410 static void atc_handle_error(struct at_dma_chan *atchan) 411 { 412 struct at_desc *bad_desc; 413 struct at_desc *child; 414 415 /* 416 * The descriptor currently at the head of the active list is 417 * broked. Since we don't have any way to report errors, we'll 418 * just have to scream loudly and try to carry on. 419 */ 420 bad_desc = atc_first_active(atchan); 421 list_del_init(&bad_desc->desc_node); 422 423 /* As we are stopped, take advantage to push queued descriptors 424 * in active_list */ 425 list_splice_init(&atchan->queue, atchan->active_list.prev); 426 427 /* Try to restart the controller */ 428 if (!list_empty(&atchan->active_list)) 429 atc_dostart(atchan, atc_first_active(atchan)); 430 431 /* 432 * KERN_CRITICAL may seem harsh, but since this only happens 433 * when someone submits a bad physical address in a 434 * descriptor, we should consider ourselves lucky that the 435 * controller flagged an error instead of scribbling over 436 * random memory locations. 437 */ 438 dev_crit(chan2dev(&atchan->chan_common), 439 "Bad descriptor submitted for DMA!\n"); 440 dev_crit(chan2dev(&atchan->chan_common), 441 " cookie: %d\n", bad_desc->txd.cookie); 442 atc_dump_lli(atchan, &bad_desc->lli); 443 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 444 atc_dump_lli(atchan, &child->lli); 445 446 /* Pretend the descriptor completed successfully */ 447 atc_chain_complete(atchan, bad_desc); 448 } 449 450 /** 451 * atc_handle_cyclic - at the end of a period, run callback function 452 * @atchan: channel used for cyclic operations 453 * 454 * Called with atchan->lock held and bh disabled 455 */ 456 static void atc_handle_cyclic(struct at_dma_chan *atchan) 457 { 458 struct at_desc *first = atc_first_active(atchan); 459 struct dma_async_tx_descriptor *txd = &first->txd; 460 dma_async_tx_callback callback = txd->callback; 461 void *param = txd->callback_param; 462 463 dev_vdbg(chan2dev(&atchan->chan_common), 464 "new cyclic period llp 0x%08x\n", 465 channel_readl(atchan, DSCR)); 466 467 if (callback) 468 callback(param); 469 } 470 471 /*-- IRQ & Tasklet ---------------------------------------------------*/ 472 473 static void atc_tasklet(unsigned long data) 474 { 475 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 476 unsigned long flags; 477 478 spin_lock_irqsave(&atchan->lock, flags); 479 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 480 atc_handle_error(atchan); 481 else if (atc_chan_is_cyclic(atchan)) 482 atc_handle_cyclic(atchan); 483 else 484 atc_advance_work(atchan); 485 486 spin_unlock_irqrestore(&atchan->lock, flags); 487 } 488 489 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 490 { 491 struct at_dma *atdma = (struct at_dma *)dev_id; 492 struct at_dma_chan *atchan; 493 int i; 494 u32 status, pending, imr; 495 int ret = IRQ_NONE; 496 497 do { 498 imr = dma_readl(atdma, EBCIMR); 499 status = dma_readl(atdma, EBCISR); 500 pending = status & imr; 501 502 if (!pending) 503 break; 504 505 dev_vdbg(atdma->dma_common.dev, 506 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 507 status, imr, pending); 508 509 for (i = 0; i < atdma->dma_common.chancnt; i++) { 510 atchan = &atdma->chan[i]; 511 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 512 if (pending & AT_DMA_ERR(i)) { 513 /* Disable channel on AHB error */ 514 dma_writel(atdma, CHDR, 515 AT_DMA_RES(i) | atchan->mask); 516 /* Give information to tasklet */ 517 set_bit(ATC_IS_ERROR, &atchan->status); 518 } 519 tasklet_schedule(&atchan->tasklet); 520 ret = IRQ_HANDLED; 521 } 522 } 523 524 } while (pending); 525 526 return ret; 527 } 528 529 530 /*-- DMA Engine API --------------------------------------------------*/ 531 532 /** 533 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 534 * @desc: descriptor at the head of the transaction chain 535 * 536 * Queue chain if DMA engine is working already 537 * 538 * Cookie increment and adding to active_list or queue must be atomic 539 */ 540 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 541 { 542 struct at_desc *desc = txd_to_at_desc(tx); 543 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 544 dma_cookie_t cookie; 545 unsigned long flags; 546 547 spin_lock_irqsave(&atchan->lock, flags); 548 cookie = atc_assign_cookie(atchan, desc); 549 550 if (list_empty(&atchan->active_list)) { 551 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 552 desc->txd.cookie); 553 atc_dostart(atchan, desc); 554 list_add_tail(&desc->desc_node, &atchan->active_list); 555 } else { 556 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 557 desc->txd.cookie); 558 list_add_tail(&desc->desc_node, &atchan->queue); 559 } 560 561 spin_unlock_irqrestore(&atchan->lock, flags); 562 563 return cookie; 564 } 565 566 /** 567 * atc_prep_dma_memcpy - prepare a memcpy operation 568 * @chan: the channel to prepare operation on 569 * @dest: operation virtual destination address 570 * @src: operation virtual source address 571 * @len: operation length 572 * @flags: tx descriptor status flags 573 */ 574 static struct dma_async_tx_descriptor * 575 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 576 size_t len, unsigned long flags) 577 { 578 struct at_dma_chan *atchan = to_at_dma_chan(chan); 579 struct at_desc *desc = NULL; 580 struct at_desc *first = NULL; 581 struct at_desc *prev = NULL; 582 size_t xfer_count; 583 size_t offset; 584 unsigned int src_width; 585 unsigned int dst_width; 586 u32 ctrla; 587 u32 ctrlb; 588 589 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 590 dest, src, len, flags); 591 592 if (unlikely(!len)) { 593 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 594 return NULL; 595 } 596 597 ctrla = ATC_DEFAULT_CTRLA; 598 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 599 | ATC_SRC_ADDR_MODE_INCR 600 | ATC_DST_ADDR_MODE_INCR 601 | ATC_FC_MEM2MEM; 602 603 /* 604 * We can be a lot more clever here, but this should take care 605 * of the most common optimization. 606 */ 607 if (!((src | dest | len) & 3)) { 608 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 609 src_width = dst_width = 2; 610 } else if (!((src | dest | len) & 1)) { 611 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 612 src_width = dst_width = 1; 613 } else { 614 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 615 src_width = dst_width = 0; 616 } 617 618 for (offset = 0; offset < len; offset += xfer_count << src_width) { 619 xfer_count = min_t(size_t, (len - offset) >> src_width, 620 ATC_BTSIZE_MAX); 621 622 desc = atc_desc_get(atchan); 623 if (!desc) 624 goto err_desc_get; 625 626 desc->lli.saddr = src + offset; 627 desc->lli.daddr = dest + offset; 628 desc->lli.ctrla = ctrla | xfer_count; 629 desc->lli.ctrlb = ctrlb; 630 631 desc->txd.cookie = 0; 632 633 atc_desc_chain(&first, &prev, desc); 634 } 635 636 /* First descriptor of the chain embedds additional information */ 637 first->txd.cookie = -EBUSY; 638 first->len = len; 639 640 /* set end-of-link to the last link descriptor of list*/ 641 set_desc_eol(desc); 642 643 first->txd.flags = flags; /* client is in control of this ack */ 644 645 return &first->txd; 646 647 err_desc_get: 648 atc_desc_put(atchan, first); 649 return NULL; 650 } 651 652 653 /** 654 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 655 * @chan: DMA channel 656 * @sgl: scatterlist to transfer to/from 657 * @sg_len: number of entries in @scatterlist 658 * @direction: DMA direction 659 * @flags: tx descriptor status flags 660 */ 661 static struct dma_async_tx_descriptor * 662 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 663 unsigned int sg_len, enum dma_data_direction direction, 664 unsigned long flags) 665 { 666 struct at_dma_chan *atchan = to_at_dma_chan(chan); 667 struct at_dma_slave *atslave = chan->private; 668 struct at_desc *first = NULL; 669 struct at_desc *prev = NULL; 670 u32 ctrla; 671 u32 ctrlb; 672 dma_addr_t reg; 673 unsigned int reg_width; 674 unsigned int mem_width; 675 unsigned int i; 676 struct scatterlist *sg; 677 size_t total_len = 0; 678 679 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 680 sg_len, 681 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 682 flags); 683 684 if (unlikely(!atslave || !sg_len)) { 685 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 686 return NULL; 687 } 688 689 reg_width = atslave->reg_width; 690 691 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 692 ctrlb = ATC_IEN; 693 694 switch (direction) { 695 case DMA_TO_DEVICE: 696 ctrla |= ATC_DST_WIDTH(reg_width); 697 ctrlb |= ATC_DST_ADDR_MODE_FIXED 698 | ATC_SRC_ADDR_MODE_INCR 699 | ATC_FC_MEM2PER 700 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 701 reg = atslave->tx_reg; 702 for_each_sg(sgl, sg, sg_len, i) { 703 struct at_desc *desc; 704 u32 len; 705 u32 mem; 706 707 desc = atc_desc_get(atchan); 708 if (!desc) 709 goto err_desc_get; 710 711 mem = sg_dma_address(sg); 712 len = sg_dma_len(sg); 713 mem_width = 2; 714 if (unlikely(mem & 3 || len & 3)) 715 mem_width = 0; 716 717 desc->lli.saddr = mem; 718 desc->lli.daddr = reg; 719 desc->lli.ctrla = ctrla 720 | ATC_SRC_WIDTH(mem_width) 721 | len >> mem_width; 722 desc->lli.ctrlb = ctrlb; 723 724 atc_desc_chain(&first, &prev, desc); 725 total_len += len; 726 } 727 break; 728 case DMA_FROM_DEVICE: 729 ctrla |= ATC_SRC_WIDTH(reg_width); 730 ctrlb |= ATC_DST_ADDR_MODE_INCR 731 | ATC_SRC_ADDR_MODE_FIXED 732 | ATC_FC_PER2MEM 733 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 734 735 reg = atslave->rx_reg; 736 for_each_sg(sgl, sg, sg_len, i) { 737 struct at_desc *desc; 738 u32 len; 739 u32 mem; 740 741 desc = atc_desc_get(atchan); 742 if (!desc) 743 goto err_desc_get; 744 745 mem = sg_dma_address(sg); 746 len = sg_dma_len(sg); 747 mem_width = 2; 748 if (unlikely(mem & 3 || len & 3)) 749 mem_width = 0; 750 751 desc->lli.saddr = reg; 752 desc->lli.daddr = mem; 753 desc->lli.ctrla = ctrla 754 | ATC_DST_WIDTH(mem_width) 755 | len >> reg_width; 756 desc->lli.ctrlb = ctrlb; 757 758 atc_desc_chain(&first, &prev, desc); 759 total_len += len; 760 } 761 break; 762 default: 763 return NULL; 764 } 765 766 /* set end-of-link to the last link descriptor of list*/ 767 set_desc_eol(prev); 768 769 /* First descriptor of the chain embedds additional information */ 770 first->txd.cookie = -EBUSY; 771 first->len = total_len; 772 773 /* first link descriptor of list is responsible of flags */ 774 first->txd.flags = flags; /* client is in control of this ack */ 775 776 return &first->txd; 777 778 err_desc_get: 779 dev_err(chan2dev(chan), "not enough descriptors available\n"); 780 atc_desc_put(atchan, first); 781 return NULL; 782 } 783 784 /** 785 * atc_dma_cyclic_check_values 786 * Check for too big/unaligned periods and unaligned DMA buffer 787 */ 788 static int 789 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 790 size_t period_len, enum dma_data_direction direction) 791 { 792 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 793 goto err_out; 794 if (unlikely(period_len & ((1 << reg_width) - 1))) 795 goto err_out; 796 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 797 goto err_out; 798 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 799 goto err_out; 800 801 return 0; 802 803 err_out: 804 return -EINVAL; 805 } 806 807 /** 808 * atc_dma_cyclic_fill_desc - Fill one period decriptor 809 */ 810 static int 811 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, 812 unsigned int period_index, dma_addr_t buf_addr, 813 size_t period_len, enum dma_data_direction direction) 814 { 815 u32 ctrla; 816 unsigned int reg_width = atslave->reg_width; 817 818 /* prepare common CRTLA value */ 819 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 820 | ATC_DST_WIDTH(reg_width) 821 | ATC_SRC_WIDTH(reg_width) 822 | period_len >> reg_width; 823 824 switch (direction) { 825 case DMA_TO_DEVICE: 826 desc->lli.saddr = buf_addr + (period_len * period_index); 827 desc->lli.daddr = atslave->tx_reg; 828 desc->lli.ctrla = ctrla; 829 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 830 | ATC_SRC_ADDR_MODE_INCR 831 | ATC_FC_MEM2PER 832 | ATC_SIF(AT_DMA_MEM_IF) 833 | ATC_DIF(AT_DMA_PER_IF); 834 break; 835 836 case DMA_FROM_DEVICE: 837 desc->lli.saddr = atslave->rx_reg; 838 desc->lli.daddr = buf_addr + (period_len * period_index); 839 desc->lli.ctrla = ctrla; 840 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 841 | ATC_SRC_ADDR_MODE_FIXED 842 | ATC_FC_PER2MEM 843 | ATC_SIF(AT_DMA_PER_IF) 844 | ATC_DIF(AT_DMA_MEM_IF); 845 break; 846 847 default: 848 return -EINVAL; 849 } 850 851 return 0; 852 } 853 854 /** 855 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 856 * @chan: the DMA channel to prepare 857 * @buf_addr: physical DMA address where the buffer starts 858 * @buf_len: total number of bytes for the entire buffer 859 * @period_len: number of bytes for each period 860 * @direction: transfer direction, to or from device 861 */ 862 static struct dma_async_tx_descriptor * 863 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 864 size_t period_len, enum dma_data_direction direction) 865 { 866 struct at_dma_chan *atchan = to_at_dma_chan(chan); 867 struct at_dma_slave *atslave = chan->private; 868 struct at_desc *first = NULL; 869 struct at_desc *prev = NULL; 870 unsigned long was_cyclic; 871 unsigned int periods = buf_len / period_len; 872 unsigned int i; 873 874 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 875 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 876 buf_addr, 877 periods, buf_len, period_len); 878 879 if (unlikely(!atslave || !buf_len || !period_len)) { 880 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 881 return NULL; 882 } 883 884 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 885 if (was_cyclic) { 886 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 887 return NULL; 888 } 889 890 /* Check for too big/unaligned periods and unaligned DMA buffer */ 891 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, 892 period_len, direction)) 893 goto err_out; 894 895 /* build cyclic linked list */ 896 for (i = 0; i < periods; i++) { 897 struct at_desc *desc; 898 899 desc = atc_desc_get(atchan); 900 if (!desc) 901 goto err_desc_get; 902 903 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, 904 period_len, direction)) 905 goto err_desc_get; 906 907 atc_desc_chain(&first, &prev, desc); 908 } 909 910 /* lets make a cyclic list */ 911 prev->lli.dscr = first->txd.phys; 912 913 /* First descriptor of the chain embedds additional information */ 914 first->txd.cookie = -EBUSY; 915 first->len = buf_len; 916 917 return &first->txd; 918 919 err_desc_get: 920 dev_err(chan2dev(chan), "not enough descriptors available\n"); 921 atc_desc_put(atchan, first); 922 err_out: 923 clear_bit(ATC_IS_CYCLIC, &atchan->status); 924 return NULL; 925 } 926 927 928 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 929 unsigned long arg) 930 { 931 struct at_dma_chan *atchan = to_at_dma_chan(chan); 932 struct at_dma *atdma = to_at_dma(chan->device); 933 int chan_id = atchan->chan_common.chan_id; 934 unsigned long flags; 935 936 LIST_HEAD(list); 937 938 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 939 940 if (cmd == DMA_PAUSE) { 941 spin_lock_irqsave(&atchan->lock, flags); 942 943 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 944 set_bit(ATC_IS_PAUSED, &atchan->status); 945 946 spin_unlock_irqrestore(&atchan->lock, flags); 947 } else if (cmd == DMA_RESUME) { 948 if (!atc_chan_is_paused(atchan)) 949 return 0; 950 951 spin_lock_irqsave(&atchan->lock, flags); 952 953 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 954 clear_bit(ATC_IS_PAUSED, &atchan->status); 955 956 spin_unlock_irqrestore(&atchan->lock, flags); 957 } else if (cmd == DMA_TERMINATE_ALL) { 958 struct at_desc *desc, *_desc; 959 /* 960 * This is only called when something went wrong elsewhere, so 961 * we don't really care about the data. Just disable the 962 * channel. We still have to poll the channel enable bit due 963 * to AHB/HSB limitations. 964 */ 965 spin_lock_irqsave(&atchan->lock, flags); 966 967 /* disabling channel: must also remove suspend state */ 968 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 969 970 /* confirm that this channel is disabled */ 971 while (dma_readl(atdma, CHSR) & atchan->mask) 972 cpu_relax(); 973 974 /* active_list entries will end up before queued entries */ 975 list_splice_init(&atchan->queue, &list); 976 list_splice_init(&atchan->active_list, &list); 977 978 /* Flush all pending and queued descriptors */ 979 list_for_each_entry_safe(desc, _desc, &list, desc_node) 980 atc_chain_complete(atchan, desc); 981 982 clear_bit(ATC_IS_PAUSED, &atchan->status); 983 /* if channel dedicated to cyclic operations, free it */ 984 clear_bit(ATC_IS_CYCLIC, &atchan->status); 985 986 spin_unlock_irqrestore(&atchan->lock, flags); 987 } else { 988 return -ENXIO; 989 } 990 991 return 0; 992 } 993 994 /** 995 * atc_tx_status - poll for transaction completion 996 * @chan: DMA channel 997 * @cookie: transaction identifier to check status of 998 * @txstate: if not %NULL updated with transaction state 999 * 1000 * If @txstate is passed in, upon return it reflect the driver 1001 * internal state and can be used with dma_async_is_complete() to check 1002 * the status of multiple cookies without re-checking hardware state. 1003 */ 1004 static enum dma_status 1005 atc_tx_status(struct dma_chan *chan, 1006 dma_cookie_t cookie, 1007 struct dma_tx_state *txstate) 1008 { 1009 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1010 dma_cookie_t last_used; 1011 dma_cookie_t last_complete; 1012 unsigned long flags; 1013 enum dma_status ret; 1014 1015 spin_lock_irqsave(&atchan->lock, flags); 1016 1017 last_complete = atchan->completed_cookie; 1018 last_used = chan->cookie; 1019 1020 ret = dma_async_is_complete(cookie, last_complete, last_used); 1021 if (ret != DMA_SUCCESS) { 1022 atc_cleanup_descriptors(atchan); 1023 1024 last_complete = atchan->completed_cookie; 1025 last_used = chan->cookie; 1026 1027 ret = dma_async_is_complete(cookie, last_complete, last_used); 1028 } 1029 1030 spin_unlock_irqrestore(&atchan->lock, flags); 1031 1032 if (ret != DMA_SUCCESS) 1033 dma_set_tx_state(txstate, last_complete, last_used, 1034 atc_first_active(atchan)->len); 1035 else 1036 dma_set_tx_state(txstate, last_complete, last_used, 0); 1037 1038 if (atc_chan_is_paused(atchan)) 1039 ret = DMA_PAUSED; 1040 1041 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1042 ret, cookie, last_complete ? last_complete : 0, 1043 last_used ? last_used : 0); 1044 1045 return ret; 1046 } 1047 1048 /** 1049 * atc_issue_pending - try to finish work 1050 * @chan: target DMA channel 1051 */ 1052 static void atc_issue_pending(struct dma_chan *chan) 1053 { 1054 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1055 unsigned long flags; 1056 1057 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1058 1059 /* Not needed for cyclic transfers */ 1060 if (atc_chan_is_cyclic(atchan)) 1061 return; 1062 1063 spin_lock_irqsave(&atchan->lock, flags); 1064 if (!atc_chan_is_enabled(atchan)) { 1065 atc_advance_work(atchan); 1066 } 1067 spin_unlock_irqrestore(&atchan->lock, flags); 1068 } 1069 1070 /** 1071 * atc_alloc_chan_resources - allocate resources for DMA channel 1072 * @chan: allocate descriptor resources for this channel 1073 * @client: current client requesting the channel be ready for requests 1074 * 1075 * return - the number of allocated descriptors 1076 */ 1077 static int atc_alloc_chan_resources(struct dma_chan *chan) 1078 { 1079 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1080 struct at_dma *atdma = to_at_dma(chan->device); 1081 struct at_desc *desc; 1082 struct at_dma_slave *atslave; 1083 unsigned long flags; 1084 int i; 1085 u32 cfg; 1086 LIST_HEAD(tmp_list); 1087 1088 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1089 1090 /* ASSERT: channel is idle */ 1091 if (atc_chan_is_enabled(atchan)) { 1092 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1093 return -EIO; 1094 } 1095 1096 cfg = ATC_DEFAULT_CFG; 1097 1098 atslave = chan->private; 1099 if (atslave) { 1100 /* 1101 * We need controller-specific data to set up slave 1102 * transfers. 1103 */ 1104 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1105 1106 /* if cfg configuration specified take it instad of default */ 1107 if (atslave->cfg) 1108 cfg = atslave->cfg; 1109 } 1110 1111 /* have we already been set up? 1112 * reconfigure channel but no need to reallocate descriptors */ 1113 if (!list_empty(&atchan->free_list)) 1114 return atchan->descs_allocated; 1115 1116 /* Allocate initial pool of descriptors */ 1117 for (i = 0; i < init_nr_desc_per_channel; i++) { 1118 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1119 if (!desc) { 1120 dev_err(atdma->dma_common.dev, 1121 "Only %d initial descriptors\n", i); 1122 break; 1123 } 1124 list_add_tail(&desc->desc_node, &tmp_list); 1125 } 1126 1127 spin_lock_irqsave(&atchan->lock, flags); 1128 atchan->descs_allocated = i; 1129 list_splice(&tmp_list, &atchan->free_list); 1130 atchan->completed_cookie = chan->cookie = 1; 1131 spin_unlock_irqrestore(&atchan->lock, flags); 1132 1133 /* channel parameters */ 1134 channel_writel(atchan, CFG, cfg); 1135 1136 dev_dbg(chan2dev(chan), 1137 "alloc_chan_resources: allocated %d descriptors\n", 1138 atchan->descs_allocated); 1139 1140 return atchan->descs_allocated; 1141 } 1142 1143 /** 1144 * atc_free_chan_resources - free all channel resources 1145 * @chan: DMA channel 1146 */ 1147 static void atc_free_chan_resources(struct dma_chan *chan) 1148 { 1149 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1150 struct at_dma *atdma = to_at_dma(chan->device); 1151 struct at_desc *desc, *_desc; 1152 LIST_HEAD(list); 1153 1154 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1155 atchan->descs_allocated); 1156 1157 /* ASSERT: channel is idle */ 1158 BUG_ON(!list_empty(&atchan->active_list)); 1159 BUG_ON(!list_empty(&atchan->queue)); 1160 BUG_ON(atc_chan_is_enabled(atchan)); 1161 1162 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1163 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1164 list_del(&desc->desc_node); 1165 /* free link descriptor */ 1166 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1167 } 1168 list_splice_init(&atchan->free_list, &list); 1169 atchan->descs_allocated = 0; 1170 atchan->status = 0; 1171 1172 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1173 } 1174 1175 1176 /*-- Module Management -----------------------------------------------*/ 1177 1178 /** 1179 * at_dma_off - disable DMA controller 1180 * @atdma: the Atmel HDAMC device 1181 */ 1182 static void at_dma_off(struct at_dma *atdma) 1183 { 1184 dma_writel(atdma, EN, 0); 1185 1186 /* disable all interrupts */ 1187 dma_writel(atdma, EBCIDR, -1L); 1188 1189 /* confirm that all channels are disabled */ 1190 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1191 cpu_relax(); 1192 } 1193 1194 static int __init at_dma_probe(struct platform_device *pdev) 1195 { 1196 struct at_dma_platform_data *pdata; 1197 struct resource *io; 1198 struct at_dma *atdma; 1199 size_t size; 1200 int irq; 1201 int err; 1202 int i; 1203 1204 /* get DMA Controller parameters from platform */ 1205 pdata = pdev->dev.platform_data; 1206 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) 1207 return -EINVAL; 1208 1209 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1210 if (!io) 1211 return -EINVAL; 1212 1213 irq = platform_get_irq(pdev, 0); 1214 if (irq < 0) 1215 return irq; 1216 1217 size = sizeof(struct at_dma); 1218 size += pdata->nr_channels * sizeof(struct at_dma_chan); 1219 atdma = kzalloc(size, GFP_KERNEL); 1220 if (!atdma) 1221 return -ENOMEM; 1222 1223 /* discover transaction capabilites from the platform data */ 1224 atdma->dma_common.cap_mask = pdata->cap_mask; 1225 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; 1226 1227 size = resource_size(io); 1228 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1229 err = -EBUSY; 1230 goto err_kfree; 1231 } 1232 1233 atdma->regs = ioremap(io->start, size); 1234 if (!atdma->regs) { 1235 err = -ENOMEM; 1236 goto err_release_r; 1237 } 1238 1239 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1240 if (IS_ERR(atdma->clk)) { 1241 err = PTR_ERR(atdma->clk); 1242 goto err_clk; 1243 } 1244 clk_enable(atdma->clk); 1245 1246 /* force dma off, just in case */ 1247 at_dma_off(atdma); 1248 1249 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1250 if (err) 1251 goto err_irq; 1252 1253 platform_set_drvdata(pdev, atdma); 1254 1255 /* create a pool of consistent memory blocks for hardware descriptors */ 1256 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1257 &pdev->dev, sizeof(struct at_desc), 1258 4 /* word alignment */, 0); 1259 if (!atdma->dma_desc_pool) { 1260 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1261 err = -ENOMEM; 1262 goto err_pool_create; 1263 } 1264 1265 /* clear any pending interrupt */ 1266 while (dma_readl(atdma, EBCISR)) 1267 cpu_relax(); 1268 1269 /* initialize channels related values */ 1270 INIT_LIST_HEAD(&atdma->dma_common.channels); 1271 for (i = 0; i < pdata->nr_channels; i++) { 1272 struct at_dma_chan *atchan = &atdma->chan[i]; 1273 1274 atchan->chan_common.device = &atdma->dma_common; 1275 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1276 list_add_tail(&atchan->chan_common.device_node, 1277 &atdma->dma_common.channels); 1278 1279 atchan->ch_regs = atdma->regs + ch_regs(i); 1280 spin_lock_init(&atchan->lock); 1281 atchan->mask = 1 << i; 1282 1283 INIT_LIST_HEAD(&atchan->active_list); 1284 INIT_LIST_HEAD(&atchan->queue); 1285 INIT_LIST_HEAD(&atchan->free_list); 1286 1287 tasklet_init(&atchan->tasklet, atc_tasklet, 1288 (unsigned long)atchan); 1289 atc_enable_irq(atchan); 1290 } 1291 1292 /* set base routines */ 1293 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1294 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1295 atdma->dma_common.device_tx_status = atc_tx_status; 1296 atdma->dma_common.device_issue_pending = atc_issue_pending; 1297 atdma->dma_common.dev = &pdev->dev; 1298 1299 /* set prep routines based on capability */ 1300 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1301 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1302 1303 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1304 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1305 /* controller can do slave DMA: can trigger cyclic transfers */ 1306 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1307 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1308 atdma->dma_common.device_control = atc_control; 1309 } 1310 1311 dma_writel(atdma, EN, AT_DMA_ENABLE); 1312 1313 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1314 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1315 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1316 pdata->nr_channels); 1317 1318 dma_async_device_register(&atdma->dma_common); 1319 1320 return 0; 1321 1322 err_pool_create: 1323 platform_set_drvdata(pdev, NULL); 1324 free_irq(platform_get_irq(pdev, 0), atdma); 1325 err_irq: 1326 clk_disable(atdma->clk); 1327 clk_put(atdma->clk); 1328 err_clk: 1329 iounmap(atdma->regs); 1330 atdma->regs = NULL; 1331 err_release_r: 1332 release_mem_region(io->start, size); 1333 err_kfree: 1334 kfree(atdma); 1335 return err; 1336 } 1337 1338 static int __exit at_dma_remove(struct platform_device *pdev) 1339 { 1340 struct at_dma *atdma = platform_get_drvdata(pdev); 1341 struct dma_chan *chan, *_chan; 1342 struct resource *io; 1343 1344 at_dma_off(atdma); 1345 dma_async_device_unregister(&atdma->dma_common); 1346 1347 dma_pool_destroy(atdma->dma_desc_pool); 1348 platform_set_drvdata(pdev, NULL); 1349 free_irq(platform_get_irq(pdev, 0), atdma); 1350 1351 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1352 device_node) { 1353 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1354 1355 /* Disable interrupts */ 1356 atc_disable_irq(atchan); 1357 tasklet_disable(&atchan->tasklet); 1358 1359 tasklet_kill(&atchan->tasklet); 1360 list_del(&chan->device_node); 1361 } 1362 1363 clk_disable(atdma->clk); 1364 clk_put(atdma->clk); 1365 1366 iounmap(atdma->regs); 1367 atdma->regs = NULL; 1368 1369 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1370 release_mem_region(io->start, resource_size(io)); 1371 1372 kfree(atdma); 1373 1374 return 0; 1375 } 1376 1377 static void at_dma_shutdown(struct platform_device *pdev) 1378 { 1379 struct at_dma *atdma = platform_get_drvdata(pdev); 1380 1381 at_dma_off(platform_get_drvdata(pdev)); 1382 clk_disable(atdma->clk); 1383 } 1384 1385 static int at_dma_prepare(struct device *dev) 1386 { 1387 struct platform_device *pdev = to_platform_device(dev); 1388 struct at_dma *atdma = platform_get_drvdata(pdev); 1389 struct dma_chan *chan, *_chan; 1390 1391 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1392 device_node) { 1393 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1394 /* wait for transaction completion (except in cyclic case) */ 1395 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1396 return -EAGAIN; 1397 } 1398 return 0; 1399 } 1400 1401 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1402 { 1403 struct dma_chan *chan = &atchan->chan_common; 1404 1405 /* Channel should be paused by user 1406 * do it anyway even if it is not done already */ 1407 if (!atc_chan_is_paused(atchan)) { 1408 dev_warn(chan2dev(chan), 1409 "cyclic channel not paused, should be done by channel user\n"); 1410 atc_control(chan, DMA_PAUSE, 0); 1411 } 1412 1413 /* now preserve additional data for cyclic operations */ 1414 /* next descriptor address in the cyclic list */ 1415 atchan->save_dscr = channel_readl(atchan, DSCR); 1416 1417 vdbg_dump_regs(atchan); 1418 } 1419 1420 static int at_dma_suspend_noirq(struct device *dev) 1421 { 1422 struct platform_device *pdev = to_platform_device(dev); 1423 struct at_dma *atdma = platform_get_drvdata(pdev); 1424 struct dma_chan *chan, *_chan; 1425 1426 /* preserve data */ 1427 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1428 device_node) { 1429 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1430 1431 if (atc_chan_is_cyclic(atchan)) 1432 atc_suspend_cyclic(atchan); 1433 atchan->save_cfg = channel_readl(atchan, CFG); 1434 } 1435 atdma->save_imr = dma_readl(atdma, EBCIMR); 1436 1437 /* disable DMA controller */ 1438 at_dma_off(atdma); 1439 clk_disable(atdma->clk); 1440 return 0; 1441 } 1442 1443 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1444 { 1445 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1446 1447 /* restore channel status for cyclic descriptors list: 1448 * next descriptor in the cyclic list at the time of suspend */ 1449 channel_writel(atchan, SADDR, 0); 1450 channel_writel(atchan, DADDR, 0); 1451 channel_writel(atchan, CTRLA, 0); 1452 channel_writel(atchan, CTRLB, 0); 1453 channel_writel(atchan, DSCR, atchan->save_dscr); 1454 dma_writel(atdma, CHER, atchan->mask); 1455 1456 /* channel pause status should be removed by channel user 1457 * We cannot take the initiative to do it here */ 1458 1459 vdbg_dump_regs(atchan); 1460 } 1461 1462 static int at_dma_resume_noirq(struct device *dev) 1463 { 1464 struct platform_device *pdev = to_platform_device(dev); 1465 struct at_dma *atdma = platform_get_drvdata(pdev); 1466 struct dma_chan *chan, *_chan; 1467 1468 /* bring back DMA controller */ 1469 clk_enable(atdma->clk); 1470 dma_writel(atdma, EN, AT_DMA_ENABLE); 1471 1472 /* clear any pending interrupt */ 1473 while (dma_readl(atdma, EBCISR)) 1474 cpu_relax(); 1475 1476 /* restore saved data */ 1477 dma_writel(atdma, EBCIER, atdma->save_imr); 1478 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1479 device_node) { 1480 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1481 1482 channel_writel(atchan, CFG, atchan->save_cfg); 1483 if (atc_chan_is_cyclic(atchan)) 1484 atc_resume_cyclic(atchan); 1485 } 1486 return 0; 1487 } 1488 1489 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1490 .prepare = at_dma_prepare, 1491 .suspend_noirq = at_dma_suspend_noirq, 1492 .resume_noirq = at_dma_resume_noirq, 1493 }; 1494 1495 static struct platform_driver at_dma_driver = { 1496 .remove = __exit_p(at_dma_remove), 1497 .shutdown = at_dma_shutdown, 1498 .driver = { 1499 .name = "at_hdmac", 1500 .pm = &at_dma_dev_pm_ops, 1501 }, 1502 }; 1503 1504 static int __init at_dma_init(void) 1505 { 1506 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1507 } 1508 subsys_initcall(at_dma_init); 1509 1510 static void __exit at_dma_exit(void) 1511 { 1512 platform_driver_unregister(&at_dma_driver); 1513 } 1514 module_exit(at_dma_exit); 1515 1516 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1517 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1518 MODULE_LICENSE("GPL"); 1519 MODULE_ALIAS("platform:at_hdmac"); 1520