1 /* 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 3 * 4 * Copyright (C) 2008 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * 12 * This supports the Atmel AHB DMA Controller, 13 * 14 * The driver has currently been tested with the Atmel AT91SAM9RL 15 * and AT91SAM9G45 series. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/dmaengine.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dmapool.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/slab.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 29 #include "at_hdmac_regs.h" 30 31 /* 32 * Glossary 33 * -------- 34 * 35 * at_hdmac : Name of the ATmel AHB DMA Controller 36 * at_dma_ / atdma : ATmel DMA controller entity related 37 * atc_ / atchan : ATmel DMA Channel entity related 38 */ 39 40 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 41 #define ATC_DEFAULT_CTRLA (0) 42 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43 |ATC_DIF(AT_DMA_MEM_IF)) 44 45 /* 46 * Initial number of descriptors to allocate for each channel. This could 47 * be increased during dma usage. 48 */ 49 static unsigned int init_nr_desc_per_channel = 64; 50 module_param(init_nr_desc_per_channel, uint, 0644); 51 MODULE_PARM_DESC(init_nr_desc_per_channel, 52 "initial descriptors per channel (default: 64)"); 53 54 55 /* prototypes */ 56 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 57 58 59 /*----------------------------------------------------------------------*/ 60 61 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 62 { 63 return list_first_entry(&atchan->active_list, 64 struct at_desc, desc_node); 65 } 66 67 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 68 { 69 return list_first_entry(&atchan->queue, 70 struct at_desc, desc_node); 71 } 72 73 /** 74 * atc_alloc_descriptor - allocate and return an initialized descriptor 75 * @chan: the channel to allocate descriptors for 76 * @gfp_flags: GFP allocation flags 77 * 78 * Note: The ack-bit is positioned in the descriptor flag at creation time 79 * to make initial allocation more convenient. This bit will be cleared 80 * and control will be given to client at usage time (during 81 * preparation functions). 82 */ 83 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 84 gfp_t gfp_flags) 85 { 86 struct at_desc *desc = NULL; 87 struct at_dma *atdma = to_at_dma(chan->device); 88 dma_addr_t phys; 89 90 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 91 if (desc) { 92 memset(desc, 0, sizeof(struct at_desc)); 93 INIT_LIST_HEAD(&desc->tx_list); 94 dma_async_tx_descriptor_init(&desc->txd, chan); 95 /* txd.flags will be overwritten in prep functions */ 96 desc->txd.flags = DMA_CTRL_ACK; 97 desc->txd.tx_submit = atc_tx_submit; 98 desc->txd.phys = phys; 99 } 100 101 return desc; 102 } 103 104 /** 105 * atc_desc_get - get an unused descriptor from free_list 106 * @atchan: channel we want a new descriptor for 107 */ 108 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 109 { 110 struct at_desc *desc, *_desc; 111 struct at_desc *ret = NULL; 112 unsigned long flags; 113 unsigned int i = 0; 114 LIST_HEAD(tmp_list); 115 116 spin_lock_irqsave(&atchan->lock, flags); 117 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 118 i++; 119 if (async_tx_test_ack(&desc->txd)) { 120 list_del(&desc->desc_node); 121 ret = desc; 122 break; 123 } 124 dev_dbg(chan2dev(&atchan->chan_common), 125 "desc %p not ACKed\n", desc); 126 } 127 spin_unlock_irqrestore(&atchan->lock, flags); 128 dev_vdbg(chan2dev(&atchan->chan_common), 129 "scanned %u descriptors on freelist\n", i); 130 131 /* no more descriptor available in initial pool: create one more */ 132 if (!ret) { 133 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 134 if (ret) { 135 spin_lock_irqsave(&atchan->lock, flags); 136 atchan->descs_allocated++; 137 spin_unlock_irqrestore(&atchan->lock, flags); 138 } else { 139 dev_err(chan2dev(&atchan->chan_common), 140 "not enough descriptors available\n"); 141 } 142 } 143 144 return ret; 145 } 146 147 /** 148 * atc_desc_put - move a descriptor, including any children, to the free list 149 * @atchan: channel we work on 150 * @desc: descriptor, at the head of a chain, to move to free list 151 */ 152 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 153 { 154 if (desc) { 155 struct at_desc *child; 156 unsigned long flags; 157 158 spin_lock_irqsave(&atchan->lock, flags); 159 list_for_each_entry(child, &desc->tx_list, desc_node) 160 dev_vdbg(chan2dev(&atchan->chan_common), 161 "moving child desc %p to freelist\n", 162 child); 163 list_splice_init(&desc->tx_list, &atchan->free_list); 164 dev_vdbg(chan2dev(&atchan->chan_common), 165 "moving desc %p to freelist\n", desc); 166 list_add(&desc->desc_node, &atchan->free_list); 167 spin_unlock_irqrestore(&atchan->lock, flags); 168 } 169 } 170 171 /** 172 * atc_desc_chain - build chain adding a descripor 173 * @first: address of first descripor of the chain 174 * @prev: address of previous descripor of the chain 175 * @desc: descriptor to queue 176 * 177 * Called from prep_* functions 178 */ 179 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 180 struct at_desc *desc) 181 { 182 if (!(*first)) { 183 *first = desc; 184 } else { 185 /* inform the HW lli about chaining */ 186 (*prev)->lli.dscr = desc->txd.phys; 187 /* insert the link descriptor to the LD ring */ 188 list_add_tail(&desc->desc_node, 189 &(*first)->tx_list); 190 } 191 *prev = desc; 192 } 193 194 /** 195 * atc_assign_cookie - compute and assign new cookie 196 * @atchan: channel we work on 197 * @desc: descriptor to assign cookie for 198 * 199 * Called with atchan->lock held and bh disabled 200 */ 201 static dma_cookie_t 202 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) 203 { 204 dma_cookie_t cookie = atchan->chan_common.cookie; 205 206 if (++cookie < 0) 207 cookie = 1; 208 209 atchan->chan_common.cookie = cookie; 210 desc->txd.cookie = cookie; 211 212 return cookie; 213 } 214 215 /** 216 * atc_dostart - starts the DMA engine for real 217 * @atchan: the channel we want to start 218 * @first: first descriptor in the list we want to begin with 219 * 220 * Called with atchan->lock held and bh disabled 221 */ 222 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 223 { 224 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 225 226 /* ASSERT: channel is idle */ 227 if (atc_chan_is_enabled(atchan)) { 228 dev_err(chan2dev(&atchan->chan_common), 229 "BUG: Attempted to start non-idle channel\n"); 230 dev_err(chan2dev(&atchan->chan_common), 231 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 232 channel_readl(atchan, SADDR), 233 channel_readl(atchan, DADDR), 234 channel_readl(atchan, CTRLA), 235 channel_readl(atchan, CTRLB), 236 channel_readl(atchan, DSCR)); 237 238 /* The tasklet will hopefully advance the queue... */ 239 return; 240 } 241 242 vdbg_dump_regs(atchan); 243 244 /* clear any pending interrupt */ 245 while (dma_readl(atdma, EBCISR)) 246 cpu_relax(); 247 248 channel_writel(atchan, SADDR, 0); 249 channel_writel(atchan, DADDR, 0); 250 channel_writel(atchan, CTRLA, 0); 251 channel_writel(atchan, CTRLB, 0); 252 channel_writel(atchan, DSCR, first->txd.phys); 253 dma_writel(atdma, CHER, atchan->mask); 254 255 vdbg_dump_regs(atchan); 256 } 257 258 /** 259 * atc_chain_complete - finish work for one transaction chain 260 * @atchan: channel we work on 261 * @desc: descriptor at the head of the chain we want do complete 262 * 263 * Called with atchan->lock held and bh disabled */ 264 static void 265 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 266 { 267 struct dma_async_tx_descriptor *txd = &desc->txd; 268 269 dev_vdbg(chan2dev(&atchan->chan_common), 270 "descriptor %u complete\n", txd->cookie); 271 272 atchan->completed_cookie = txd->cookie; 273 274 /* move children to free_list */ 275 list_splice_init(&desc->tx_list, &atchan->free_list); 276 /* move myself to free_list */ 277 list_move(&desc->desc_node, &atchan->free_list); 278 279 /* unmap dma addresses (not on slave channels) */ 280 if (!atchan->chan_common.private) { 281 struct device *parent = chan2parent(&atchan->chan_common); 282 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 283 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 284 dma_unmap_single(parent, 285 desc->lli.daddr, 286 desc->len, DMA_FROM_DEVICE); 287 else 288 dma_unmap_page(parent, 289 desc->lli.daddr, 290 desc->len, DMA_FROM_DEVICE); 291 } 292 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 293 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 294 dma_unmap_single(parent, 295 desc->lli.saddr, 296 desc->len, DMA_TO_DEVICE); 297 else 298 dma_unmap_page(parent, 299 desc->lli.saddr, 300 desc->len, DMA_TO_DEVICE); 301 } 302 } 303 304 /* for cyclic transfers, 305 * no need to replay callback function while stopping */ 306 if (!atc_chan_is_cyclic(atchan)) { 307 dma_async_tx_callback callback = txd->callback; 308 void *param = txd->callback_param; 309 310 /* 311 * The API requires that no submissions are done from a 312 * callback, so we don't need to drop the lock here 313 */ 314 if (callback) 315 callback(param); 316 } 317 318 dma_run_dependencies(txd); 319 } 320 321 /** 322 * atc_complete_all - finish work for all transactions 323 * @atchan: channel to complete transactions for 324 * 325 * Eventually submit queued descriptors if any 326 * 327 * Assume channel is idle while calling this function 328 * Called with atchan->lock held and bh disabled 329 */ 330 static void atc_complete_all(struct at_dma_chan *atchan) 331 { 332 struct at_desc *desc, *_desc; 333 LIST_HEAD(list); 334 335 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 336 337 BUG_ON(atc_chan_is_enabled(atchan)); 338 339 /* 340 * Submit queued descriptors ASAP, i.e. before we go through 341 * the completed ones. 342 */ 343 if (!list_empty(&atchan->queue)) 344 atc_dostart(atchan, atc_first_queued(atchan)); 345 /* empty active_list now it is completed */ 346 list_splice_init(&atchan->active_list, &list); 347 /* empty queue list by moving descriptors (if any) to active_list */ 348 list_splice_init(&atchan->queue, &atchan->active_list); 349 350 list_for_each_entry_safe(desc, _desc, &list, desc_node) 351 atc_chain_complete(atchan, desc); 352 } 353 354 /** 355 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list 356 * @atchan: channel to be cleaned up 357 * 358 * Called with atchan->lock held and bh disabled 359 */ 360 static void atc_cleanup_descriptors(struct at_dma_chan *atchan) 361 { 362 struct at_desc *desc, *_desc; 363 struct at_desc *child; 364 365 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); 366 367 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 368 if (!(desc->lli.ctrla & ATC_DONE)) 369 /* This one is currently in progress */ 370 return; 371 372 list_for_each_entry(child, &desc->tx_list, desc_node) 373 if (!(child->lli.ctrla & ATC_DONE)) 374 /* Currently in progress */ 375 return; 376 377 /* 378 * No descriptors so far seem to be in progress, i.e. 379 * this chain must be done. 380 */ 381 atc_chain_complete(atchan, desc); 382 } 383 } 384 385 /** 386 * atc_advance_work - at the end of a transaction, move forward 387 * @atchan: channel where the transaction ended 388 * 389 * Called with atchan->lock held and bh disabled 390 */ 391 static void atc_advance_work(struct at_dma_chan *atchan) 392 { 393 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 394 395 if (list_empty(&atchan->active_list) || 396 list_is_singular(&atchan->active_list)) { 397 atc_complete_all(atchan); 398 } else { 399 atc_chain_complete(atchan, atc_first_active(atchan)); 400 /* advance work */ 401 atc_dostart(atchan, atc_first_active(atchan)); 402 } 403 } 404 405 406 /** 407 * atc_handle_error - handle errors reported by DMA controller 408 * @atchan: channel where error occurs 409 * 410 * Called with atchan->lock held and bh disabled 411 */ 412 static void atc_handle_error(struct at_dma_chan *atchan) 413 { 414 struct at_desc *bad_desc; 415 struct at_desc *child; 416 417 /* 418 * The descriptor currently at the head of the active list is 419 * broked. Since we don't have any way to report errors, we'll 420 * just have to scream loudly and try to carry on. 421 */ 422 bad_desc = atc_first_active(atchan); 423 list_del_init(&bad_desc->desc_node); 424 425 /* As we are stopped, take advantage to push queued descriptors 426 * in active_list */ 427 list_splice_init(&atchan->queue, atchan->active_list.prev); 428 429 /* Try to restart the controller */ 430 if (!list_empty(&atchan->active_list)) 431 atc_dostart(atchan, atc_first_active(atchan)); 432 433 /* 434 * KERN_CRITICAL may seem harsh, but since this only happens 435 * when someone submits a bad physical address in a 436 * descriptor, we should consider ourselves lucky that the 437 * controller flagged an error instead of scribbling over 438 * random memory locations. 439 */ 440 dev_crit(chan2dev(&atchan->chan_common), 441 "Bad descriptor submitted for DMA!\n"); 442 dev_crit(chan2dev(&atchan->chan_common), 443 " cookie: %d\n", bad_desc->txd.cookie); 444 atc_dump_lli(atchan, &bad_desc->lli); 445 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 446 atc_dump_lli(atchan, &child->lli); 447 448 /* Pretend the descriptor completed successfully */ 449 atc_chain_complete(atchan, bad_desc); 450 } 451 452 /** 453 * atc_handle_cyclic - at the end of a period, run callback function 454 * @atchan: channel used for cyclic operations 455 * 456 * Called with atchan->lock held and bh disabled 457 */ 458 static void atc_handle_cyclic(struct at_dma_chan *atchan) 459 { 460 struct at_desc *first = atc_first_active(atchan); 461 struct dma_async_tx_descriptor *txd = &first->txd; 462 dma_async_tx_callback callback = txd->callback; 463 void *param = txd->callback_param; 464 465 dev_vdbg(chan2dev(&atchan->chan_common), 466 "new cyclic period llp 0x%08x\n", 467 channel_readl(atchan, DSCR)); 468 469 if (callback) 470 callback(param); 471 } 472 473 /*-- IRQ & Tasklet ---------------------------------------------------*/ 474 475 static void atc_tasklet(unsigned long data) 476 { 477 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 478 unsigned long flags; 479 480 spin_lock_irqsave(&atchan->lock, flags); 481 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 482 atc_handle_error(atchan); 483 else if (atc_chan_is_cyclic(atchan)) 484 atc_handle_cyclic(atchan); 485 else 486 atc_advance_work(atchan); 487 488 spin_unlock_irqrestore(&atchan->lock, flags); 489 } 490 491 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 492 { 493 struct at_dma *atdma = (struct at_dma *)dev_id; 494 struct at_dma_chan *atchan; 495 int i; 496 u32 status, pending, imr; 497 int ret = IRQ_NONE; 498 499 do { 500 imr = dma_readl(atdma, EBCIMR); 501 status = dma_readl(atdma, EBCISR); 502 pending = status & imr; 503 504 if (!pending) 505 break; 506 507 dev_vdbg(atdma->dma_common.dev, 508 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 509 status, imr, pending); 510 511 for (i = 0; i < atdma->dma_common.chancnt; i++) { 512 atchan = &atdma->chan[i]; 513 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 514 if (pending & AT_DMA_ERR(i)) { 515 /* Disable channel on AHB error */ 516 dma_writel(atdma, CHDR, 517 AT_DMA_RES(i) | atchan->mask); 518 /* Give information to tasklet */ 519 set_bit(ATC_IS_ERROR, &atchan->status); 520 } 521 tasklet_schedule(&atchan->tasklet); 522 ret = IRQ_HANDLED; 523 } 524 } 525 526 } while (pending); 527 528 return ret; 529 } 530 531 532 /*-- DMA Engine API --------------------------------------------------*/ 533 534 /** 535 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 536 * @desc: descriptor at the head of the transaction chain 537 * 538 * Queue chain if DMA engine is working already 539 * 540 * Cookie increment and adding to active_list or queue must be atomic 541 */ 542 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 543 { 544 struct at_desc *desc = txd_to_at_desc(tx); 545 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 546 dma_cookie_t cookie; 547 unsigned long flags; 548 549 spin_lock_irqsave(&atchan->lock, flags); 550 cookie = atc_assign_cookie(atchan, desc); 551 552 if (list_empty(&atchan->active_list)) { 553 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 554 desc->txd.cookie); 555 atc_dostart(atchan, desc); 556 list_add_tail(&desc->desc_node, &atchan->active_list); 557 } else { 558 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 559 desc->txd.cookie); 560 list_add_tail(&desc->desc_node, &atchan->queue); 561 } 562 563 spin_unlock_irqrestore(&atchan->lock, flags); 564 565 return cookie; 566 } 567 568 /** 569 * atc_prep_dma_memcpy - prepare a memcpy operation 570 * @chan: the channel to prepare operation on 571 * @dest: operation virtual destination address 572 * @src: operation virtual source address 573 * @len: operation length 574 * @flags: tx descriptor status flags 575 */ 576 static struct dma_async_tx_descriptor * 577 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 578 size_t len, unsigned long flags) 579 { 580 struct at_dma_chan *atchan = to_at_dma_chan(chan); 581 struct at_desc *desc = NULL; 582 struct at_desc *first = NULL; 583 struct at_desc *prev = NULL; 584 size_t xfer_count; 585 size_t offset; 586 unsigned int src_width; 587 unsigned int dst_width; 588 u32 ctrla; 589 u32 ctrlb; 590 591 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 592 dest, src, len, flags); 593 594 if (unlikely(!len)) { 595 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 596 return NULL; 597 } 598 599 ctrla = ATC_DEFAULT_CTRLA; 600 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 601 | ATC_SRC_ADDR_MODE_INCR 602 | ATC_DST_ADDR_MODE_INCR 603 | ATC_FC_MEM2MEM; 604 605 /* 606 * We can be a lot more clever here, but this should take care 607 * of the most common optimization. 608 */ 609 if (!((src | dest | len) & 3)) { 610 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 611 src_width = dst_width = 2; 612 } else if (!((src | dest | len) & 1)) { 613 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 614 src_width = dst_width = 1; 615 } else { 616 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 617 src_width = dst_width = 0; 618 } 619 620 for (offset = 0; offset < len; offset += xfer_count << src_width) { 621 xfer_count = min_t(size_t, (len - offset) >> src_width, 622 ATC_BTSIZE_MAX); 623 624 desc = atc_desc_get(atchan); 625 if (!desc) 626 goto err_desc_get; 627 628 desc->lli.saddr = src + offset; 629 desc->lli.daddr = dest + offset; 630 desc->lli.ctrla = ctrla | xfer_count; 631 desc->lli.ctrlb = ctrlb; 632 633 desc->txd.cookie = 0; 634 635 atc_desc_chain(&first, &prev, desc); 636 } 637 638 /* First descriptor of the chain embedds additional information */ 639 first->txd.cookie = -EBUSY; 640 first->len = len; 641 642 /* set end-of-link to the last link descriptor of list*/ 643 set_desc_eol(desc); 644 645 first->txd.flags = flags; /* client is in control of this ack */ 646 647 return &first->txd; 648 649 err_desc_get: 650 atc_desc_put(atchan, first); 651 return NULL; 652 } 653 654 655 /** 656 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 657 * @chan: DMA channel 658 * @sgl: scatterlist to transfer to/from 659 * @sg_len: number of entries in @scatterlist 660 * @direction: DMA direction 661 * @flags: tx descriptor status flags 662 */ 663 static struct dma_async_tx_descriptor * 664 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 665 unsigned int sg_len, enum dma_transfer_direction direction, 666 unsigned long flags) 667 { 668 struct at_dma_chan *atchan = to_at_dma_chan(chan); 669 struct at_dma_slave *atslave = chan->private; 670 struct at_desc *first = NULL; 671 struct at_desc *prev = NULL; 672 u32 ctrla; 673 u32 ctrlb; 674 dma_addr_t reg; 675 unsigned int reg_width; 676 unsigned int mem_width; 677 unsigned int i; 678 struct scatterlist *sg; 679 size_t total_len = 0; 680 681 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 682 sg_len, 683 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 684 flags); 685 686 if (unlikely(!atslave || !sg_len)) { 687 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 688 return NULL; 689 } 690 691 reg_width = atslave->reg_width; 692 693 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 694 ctrlb = ATC_IEN; 695 696 switch (direction) { 697 case DMA_MEM_TO_DEV: 698 ctrla |= ATC_DST_WIDTH(reg_width); 699 ctrlb |= ATC_DST_ADDR_MODE_FIXED 700 | ATC_SRC_ADDR_MODE_INCR 701 | ATC_FC_MEM2PER 702 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 703 reg = atslave->tx_reg; 704 for_each_sg(sgl, sg, sg_len, i) { 705 struct at_desc *desc; 706 u32 len; 707 u32 mem; 708 709 desc = atc_desc_get(atchan); 710 if (!desc) 711 goto err_desc_get; 712 713 mem = sg_dma_address(sg); 714 len = sg_dma_len(sg); 715 mem_width = 2; 716 if (unlikely(mem & 3 || len & 3)) 717 mem_width = 0; 718 719 desc->lli.saddr = mem; 720 desc->lli.daddr = reg; 721 desc->lli.ctrla = ctrla 722 | ATC_SRC_WIDTH(mem_width) 723 | len >> mem_width; 724 desc->lli.ctrlb = ctrlb; 725 726 atc_desc_chain(&first, &prev, desc); 727 total_len += len; 728 } 729 break; 730 case DMA_DEV_TO_MEM: 731 ctrla |= ATC_SRC_WIDTH(reg_width); 732 ctrlb |= ATC_DST_ADDR_MODE_INCR 733 | ATC_SRC_ADDR_MODE_FIXED 734 | ATC_FC_PER2MEM 735 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 736 737 reg = atslave->rx_reg; 738 for_each_sg(sgl, sg, sg_len, i) { 739 struct at_desc *desc; 740 u32 len; 741 u32 mem; 742 743 desc = atc_desc_get(atchan); 744 if (!desc) 745 goto err_desc_get; 746 747 mem = sg_dma_address(sg); 748 len = sg_dma_len(sg); 749 mem_width = 2; 750 if (unlikely(mem & 3 || len & 3)) 751 mem_width = 0; 752 753 desc->lli.saddr = reg; 754 desc->lli.daddr = mem; 755 desc->lli.ctrla = ctrla 756 | ATC_DST_WIDTH(mem_width) 757 | len >> reg_width; 758 desc->lli.ctrlb = ctrlb; 759 760 atc_desc_chain(&first, &prev, desc); 761 total_len += len; 762 } 763 break; 764 default: 765 return NULL; 766 } 767 768 /* set end-of-link to the last link descriptor of list*/ 769 set_desc_eol(prev); 770 771 /* First descriptor of the chain embedds additional information */ 772 first->txd.cookie = -EBUSY; 773 first->len = total_len; 774 775 /* first link descriptor of list is responsible of flags */ 776 first->txd.flags = flags; /* client is in control of this ack */ 777 778 return &first->txd; 779 780 err_desc_get: 781 dev_err(chan2dev(chan), "not enough descriptors available\n"); 782 atc_desc_put(atchan, first); 783 return NULL; 784 } 785 786 /** 787 * atc_dma_cyclic_check_values 788 * Check for too big/unaligned periods and unaligned DMA buffer 789 */ 790 static int 791 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 792 size_t period_len, enum dma_transfer_direction direction) 793 { 794 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 795 goto err_out; 796 if (unlikely(period_len & ((1 << reg_width) - 1))) 797 goto err_out; 798 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 799 goto err_out; 800 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) 801 goto err_out; 802 803 return 0; 804 805 err_out: 806 return -EINVAL; 807 } 808 809 /** 810 * atc_dma_cyclic_fill_desc - Fill one period decriptor 811 */ 812 static int 813 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, 814 unsigned int period_index, dma_addr_t buf_addr, 815 size_t period_len, enum dma_transfer_direction direction) 816 { 817 u32 ctrla; 818 unsigned int reg_width = atslave->reg_width; 819 820 /* prepare common CRTLA value */ 821 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 822 | ATC_DST_WIDTH(reg_width) 823 | ATC_SRC_WIDTH(reg_width) 824 | period_len >> reg_width; 825 826 switch (direction) { 827 case DMA_MEM_TO_DEV: 828 desc->lli.saddr = buf_addr + (period_len * period_index); 829 desc->lli.daddr = atslave->tx_reg; 830 desc->lli.ctrla = ctrla; 831 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 832 | ATC_SRC_ADDR_MODE_INCR 833 | ATC_FC_MEM2PER 834 | ATC_SIF(AT_DMA_MEM_IF) 835 | ATC_DIF(AT_DMA_PER_IF); 836 break; 837 838 case DMA_DEV_TO_MEM: 839 desc->lli.saddr = atslave->rx_reg; 840 desc->lli.daddr = buf_addr + (period_len * period_index); 841 desc->lli.ctrla = ctrla; 842 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 843 | ATC_SRC_ADDR_MODE_FIXED 844 | ATC_FC_PER2MEM 845 | ATC_SIF(AT_DMA_PER_IF) 846 | ATC_DIF(AT_DMA_MEM_IF); 847 break; 848 849 default: 850 return -EINVAL; 851 } 852 853 return 0; 854 } 855 856 /** 857 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 858 * @chan: the DMA channel to prepare 859 * @buf_addr: physical DMA address where the buffer starts 860 * @buf_len: total number of bytes for the entire buffer 861 * @period_len: number of bytes for each period 862 * @direction: transfer direction, to or from device 863 */ 864 static struct dma_async_tx_descriptor * 865 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 866 size_t period_len, enum dma_transfer_direction direction) 867 { 868 struct at_dma_chan *atchan = to_at_dma_chan(chan); 869 struct at_dma_slave *atslave = chan->private; 870 struct at_desc *first = NULL; 871 struct at_desc *prev = NULL; 872 unsigned long was_cyclic; 873 unsigned int periods = buf_len / period_len; 874 unsigned int i; 875 876 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 877 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 878 buf_addr, 879 periods, buf_len, period_len); 880 881 if (unlikely(!atslave || !buf_len || !period_len)) { 882 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 883 return NULL; 884 } 885 886 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 887 if (was_cyclic) { 888 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 889 return NULL; 890 } 891 892 /* Check for too big/unaligned periods and unaligned DMA buffer */ 893 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, 894 period_len, direction)) 895 goto err_out; 896 897 /* build cyclic linked list */ 898 for (i = 0; i < periods; i++) { 899 struct at_desc *desc; 900 901 desc = atc_desc_get(atchan); 902 if (!desc) 903 goto err_desc_get; 904 905 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, 906 period_len, direction)) 907 goto err_desc_get; 908 909 atc_desc_chain(&first, &prev, desc); 910 } 911 912 /* lets make a cyclic list */ 913 prev->lli.dscr = first->txd.phys; 914 915 /* First descriptor of the chain embedds additional information */ 916 first->txd.cookie = -EBUSY; 917 first->len = buf_len; 918 919 return &first->txd; 920 921 err_desc_get: 922 dev_err(chan2dev(chan), "not enough descriptors available\n"); 923 atc_desc_put(atchan, first); 924 err_out: 925 clear_bit(ATC_IS_CYCLIC, &atchan->status); 926 return NULL; 927 } 928 929 930 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 931 unsigned long arg) 932 { 933 struct at_dma_chan *atchan = to_at_dma_chan(chan); 934 struct at_dma *atdma = to_at_dma(chan->device); 935 int chan_id = atchan->chan_common.chan_id; 936 unsigned long flags; 937 938 LIST_HEAD(list); 939 940 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 941 942 if (cmd == DMA_PAUSE) { 943 spin_lock_irqsave(&atchan->lock, flags); 944 945 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 946 set_bit(ATC_IS_PAUSED, &atchan->status); 947 948 spin_unlock_irqrestore(&atchan->lock, flags); 949 } else if (cmd == DMA_RESUME) { 950 if (!atc_chan_is_paused(atchan)) 951 return 0; 952 953 spin_lock_irqsave(&atchan->lock, flags); 954 955 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 956 clear_bit(ATC_IS_PAUSED, &atchan->status); 957 958 spin_unlock_irqrestore(&atchan->lock, flags); 959 } else if (cmd == DMA_TERMINATE_ALL) { 960 struct at_desc *desc, *_desc; 961 /* 962 * This is only called when something went wrong elsewhere, so 963 * we don't really care about the data. Just disable the 964 * channel. We still have to poll the channel enable bit due 965 * to AHB/HSB limitations. 966 */ 967 spin_lock_irqsave(&atchan->lock, flags); 968 969 /* disabling channel: must also remove suspend state */ 970 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 971 972 /* confirm that this channel is disabled */ 973 while (dma_readl(atdma, CHSR) & atchan->mask) 974 cpu_relax(); 975 976 /* active_list entries will end up before queued entries */ 977 list_splice_init(&atchan->queue, &list); 978 list_splice_init(&atchan->active_list, &list); 979 980 /* Flush all pending and queued descriptors */ 981 list_for_each_entry_safe(desc, _desc, &list, desc_node) 982 atc_chain_complete(atchan, desc); 983 984 clear_bit(ATC_IS_PAUSED, &atchan->status); 985 /* if channel dedicated to cyclic operations, free it */ 986 clear_bit(ATC_IS_CYCLIC, &atchan->status); 987 988 spin_unlock_irqrestore(&atchan->lock, flags); 989 } else { 990 return -ENXIO; 991 } 992 993 return 0; 994 } 995 996 /** 997 * atc_tx_status - poll for transaction completion 998 * @chan: DMA channel 999 * @cookie: transaction identifier to check status of 1000 * @txstate: if not %NULL updated with transaction state 1001 * 1002 * If @txstate is passed in, upon return it reflect the driver 1003 * internal state and can be used with dma_async_is_complete() to check 1004 * the status of multiple cookies without re-checking hardware state. 1005 */ 1006 static enum dma_status 1007 atc_tx_status(struct dma_chan *chan, 1008 dma_cookie_t cookie, 1009 struct dma_tx_state *txstate) 1010 { 1011 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1012 dma_cookie_t last_used; 1013 dma_cookie_t last_complete; 1014 unsigned long flags; 1015 enum dma_status ret; 1016 1017 spin_lock_irqsave(&atchan->lock, flags); 1018 1019 last_complete = atchan->completed_cookie; 1020 last_used = chan->cookie; 1021 1022 ret = dma_async_is_complete(cookie, last_complete, last_used); 1023 if (ret != DMA_SUCCESS) { 1024 atc_cleanup_descriptors(atchan); 1025 1026 last_complete = atchan->completed_cookie; 1027 last_used = chan->cookie; 1028 1029 ret = dma_async_is_complete(cookie, last_complete, last_used); 1030 } 1031 1032 spin_unlock_irqrestore(&atchan->lock, flags); 1033 1034 if (ret != DMA_SUCCESS) 1035 dma_set_tx_state(txstate, last_complete, last_used, 1036 atc_first_active(atchan)->len); 1037 else 1038 dma_set_tx_state(txstate, last_complete, last_used, 0); 1039 1040 if (atc_chan_is_paused(atchan)) 1041 ret = DMA_PAUSED; 1042 1043 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", 1044 ret, cookie, last_complete ? last_complete : 0, 1045 last_used ? last_used : 0); 1046 1047 return ret; 1048 } 1049 1050 /** 1051 * atc_issue_pending - try to finish work 1052 * @chan: target DMA channel 1053 */ 1054 static void atc_issue_pending(struct dma_chan *chan) 1055 { 1056 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1057 unsigned long flags; 1058 1059 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1060 1061 /* Not needed for cyclic transfers */ 1062 if (atc_chan_is_cyclic(atchan)) 1063 return; 1064 1065 spin_lock_irqsave(&atchan->lock, flags); 1066 if (!atc_chan_is_enabled(atchan)) { 1067 atc_advance_work(atchan); 1068 } 1069 spin_unlock_irqrestore(&atchan->lock, flags); 1070 } 1071 1072 /** 1073 * atc_alloc_chan_resources - allocate resources for DMA channel 1074 * @chan: allocate descriptor resources for this channel 1075 * @client: current client requesting the channel be ready for requests 1076 * 1077 * return - the number of allocated descriptors 1078 */ 1079 static int atc_alloc_chan_resources(struct dma_chan *chan) 1080 { 1081 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1082 struct at_dma *atdma = to_at_dma(chan->device); 1083 struct at_desc *desc; 1084 struct at_dma_slave *atslave; 1085 unsigned long flags; 1086 int i; 1087 u32 cfg; 1088 LIST_HEAD(tmp_list); 1089 1090 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1091 1092 /* ASSERT: channel is idle */ 1093 if (atc_chan_is_enabled(atchan)) { 1094 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1095 return -EIO; 1096 } 1097 1098 cfg = ATC_DEFAULT_CFG; 1099 1100 atslave = chan->private; 1101 if (atslave) { 1102 /* 1103 * We need controller-specific data to set up slave 1104 * transfers. 1105 */ 1106 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1107 1108 /* if cfg configuration specified take it instad of default */ 1109 if (atslave->cfg) 1110 cfg = atslave->cfg; 1111 } 1112 1113 /* have we already been set up? 1114 * reconfigure channel but no need to reallocate descriptors */ 1115 if (!list_empty(&atchan->free_list)) 1116 return atchan->descs_allocated; 1117 1118 /* Allocate initial pool of descriptors */ 1119 for (i = 0; i < init_nr_desc_per_channel; i++) { 1120 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1121 if (!desc) { 1122 dev_err(atdma->dma_common.dev, 1123 "Only %d initial descriptors\n", i); 1124 break; 1125 } 1126 list_add_tail(&desc->desc_node, &tmp_list); 1127 } 1128 1129 spin_lock_irqsave(&atchan->lock, flags); 1130 atchan->descs_allocated = i; 1131 list_splice(&tmp_list, &atchan->free_list); 1132 atchan->completed_cookie = chan->cookie = 1; 1133 spin_unlock_irqrestore(&atchan->lock, flags); 1134 1135 /* channel parameters */ 1136 channel_writel(atchan, CFG, cfg); 1137 1138 dev_dbg(chan2dev(chan), 1139 "alloc_chan_resources: allocated %d descriptors\n", 1140 atchan->descs_allocated); 1141 1142 return atchan->descs_allocated; 1143 } 1144 1145 /** 1146 * atc_free_chan_resources - free all channel resources 1147 * @chan: DMA channel 1148 */ 1149 static void atc_free_chan_resources(struct dma_chan *chan) 1150 { 1151 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1152 struct at_dma *atdma = to_at_dma(chan->device); 1153 struct at_desc *desc, *_desc; 1154 LIST_HEAD(list); 1155 1156 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1157 atchan->descs_allocated); 1158 1159 /* ASSERT: channel is idle */ 1160 BUG_ON(!list_empty(&atchan->active_list)); 1161 BUG_ON(!list_empty(&atchan->queue)); 1162 BUG_ON(atc_chan_is_enabled(atchan)); 1163 1164 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1165 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1166 list_del(&desc->desc_node); 1167 /* free link descriptor */ 1168 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1169 } 1170 list_splice_init(&atchan->free_list, &list); 1171 atchan->descs_allocated = 0; 1172 atchan->status = 0; 1173 1174 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1175 } 1176 1177 1178 /*-- Module Management -----------------------------------------------*/ 1179 1180 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1181 static struct at_dma_platform_data at91sam9rl_config = { 1182 .nr_channels = 2, 1183 }; 1184 static struct at_dma_platform_data at91sam9g45_config = { 1185 .nr_channels = 8, 1186 }; 1187 1188 #if defined(CONFIG_OF) 1189 static const struct of_device_id atmel_dma_dt_ids[] = { 1190 { 1191 .compatible = "atmel,at91sam9rl-dma", 1192 .data = &at91sam9rl_config, 1193 }, { 1194 .compatible = "atmel,at91sam9g45-dma", 1195 .data = &at91sam9g45_config, 1196 }, { 1197 /* sentinel */ 1198 } 1199 }; 1200 1201 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1202 #endif 1203 1204 static const struct platform_device_id atdma_devtypes[] = { 1205 { 1206 .name = "at91sam9rl_dma", 1207 .driver_data = (unsigned long) &at91sam9rl_config, 1208 }, { 1209 .name = "at91sam9g45_dma", 1210 .driver_data = (unsigned long) &at91sam9g45_config, 1211 }, { 1212 /* sentinel */ 1213 } 1214 }; 1215 1216 static inline struct at_dma_platform_data * __init at_dma_get_driver_data( 1217 struct platform_device *pdev) 1218 { 1219 if (pdev->dev.of_node) { 1220 const struct of_device_id *match; 1221 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1222 if (match == NULL) 1223 return NULL; 1224 return match->data; 1225 } 1226 return (struct at_dma_platform_data *) 1227 platform_get_device_id(pdev)->driver_data; 1228 } 1229 1230 /** 1231 * at_dma_off - disable DMA controller 1232 * @atdma: the Atmel HDAMC device 1233 */ 1234 static void at_dma_off(struct at_dma *atdma) 1235 { 1236 dma_writel(atdma, EN, 0); 1237 1238 /* disable all interrupts */ 1239 dma_writel(atdma, EBCIDR, -1L); 1240 1241 /* confirm that all channels are disabled */ 1242 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1243 cpu_relax(); 1244 } 1245 1246 static int __init at_dma_probe(struct platform_device *pdev) 1247 { 1248 struct resource *io; 1249 struct at_dma *atdma; 1250 size_t size; 1251 int irq; 1252 int err; 1253 int i; 1254 struct at_dma_platform_data *plat_dat; 1255 1256 /* setup platform data for each SoC */ 1257 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1258 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1259 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1260 1261 /* get DMA parameters from controller type */ 1262 plat_dat = at_dma_get_driver_data(pdev); 1263 if (!plat_dat) 1264 return -ENODEV; 1265 1266 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1267 if (!io) 1268 return -EINVAL; 1269 1270 irq = platform_get_irq(pdev, 0); 1271 if (irq < 0) 1272 return irq; 1273 1274 size = sizeof(struct at_dma); 1275 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1276 atdma = kzalloc(size, GFP_KERNEL); 1277 if (!atdma) 1278 return -ENOMEM; 1279 1280 /* discover transaction capabilities */ 1281 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1282 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1283 1284 size = resource_size(io); 1285 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1286 err = -EBUSY; 1287 goto err_kfree; 1288 } 1289 1290 atdma->regs = ioremap(io->start, size); 1291 if (!atdma->regs) { 1292 err = -ENOMEM; 1293 goto err_release_r; 1294 } 1295 1296 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1297 if (IS_ERR(atdma->clk)) { 1298 err = PTR_ERR(atdma->clk); 1299 goto err_clk; 1300 } 1301 clk_enable(atdma->clk); 1302 1303 /* force dma off, just in case */ 1304 at_dma_off(atdma); 1305 1306 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1307 if (err) 1308 goto err_irq; 1309 1310 platform_set_drvdata(pdev, atdma); 1311 1312 /* create a pool of consistent memory blocks for hardware descriptors */ 1313 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1314 &pdev->dev, sizeof(struct at_desc), 1315 4 /* word alignment */, 0); 1316 if (!atdma->dma_desc_pool) { 1317 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1318 err = -ENOMEM; 1319 goto err_pool_create; 1320 } 1321 1322 /* clear any pending interrupt */ 1323 while (dma_readl(atdma, EBCISR)) 1324 cpu_relax(); 1325 1326 /* initialize channels related values */ 1327 INIT_LIST_HEAD(&atdma->dma_common.channels); 1328 for (i = 0; i < plat_dat->nr_channels; i++) { 1329 struct at_dma_chan *atchan = &atdma->chan[i]; 1330 1331 atchan->chan_common.device = &atdma->dma_common; 1332 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1333 list_add_tail(&atchan->chan_common.device_node, 1334 &atdma->dma_common.channels); 1335 1336 atchan->ch_regs = atdma->regs + ch_regs(i); 1337 spin_lock_init(&atchan->lock); 1338 atchan->mask = 1 << i; 1339 1340 INIT_LIST_HEAD(&atchan->active_list); 1341 INIT_LIST_HEAD(&atchan->queue); 1342 INIT_LIST_HEAD(&atchan->free_list); 1343 1344 tasklet_init(&atchan->tasklet, atc_tasklet, 1345 (unsigned long)atchan); 1346 atc_enable_irq(atchan); 1347 } 1348 1349 /* set base routines */ 1350 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1351 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1352 atdma->dma_common.device_tx_status = atc_tx_status; 1353 atdma->dma_common.device_issue_pending = atc_issue_pending; 1354 atdma->dma_common.dev = &pdev->dev; 1355 1356 /* set prep routines based on capability */ 1357 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1358 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1359 1360 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1361 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1362 /* controller can do slave DMA: can trigger cyclic transfers */ 1363 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1364 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1365 atdma->dma_common.device_control = atc_control; 1366 } 1367 1368 dma_writel(atdma, EN, AT_DMA_ENABLE); 1369 1370 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1371 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1372 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1373 plat_dat->nr_channels); 1374 1375 dma_async_device_register(&atdma->dma_common); 1376 1377 return 0; 1378 1379 err_pool_create: 1380 platform_set_drvdata(pdev, NULL); 1381 free_irq(platform_get_irq(pdev, 0), atdma); 1382 err_irq: 1383 clk_disable(atdma->clk); 1384 clk_put(atdma->clk); 1385 err_clk: 1386 iounmap(atdma->regs); 1387 atdma->regs = NULL; 1388 err_release_r: 1389 release_mem_region(io->start, size); 1390 err_kfree: 1391 kfree(atdma); 1392 return err; 1393 } 1394 1395 static int __exit at_dma_remove(struct platform_device *pdev) 1396 { 1397 struct at_dma *atdma = platform_get_drvdata(pdev); 1398 struct dma_chan *chan, *_chan; 1399 struct resource *io; 1400 1401 at_dma_off(atdma); 1402 dma_async_device_unregister(&atdma->dma_common); 1403 1404 dma_pool_destroy(atdma->dma_desc_pool); 1405 platform_set_drvdata(pdev, NULL); 1406 free_irq(platform_get_irq(pdev, 0), atdma); 1407 1408 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1409 device_node) { 1410 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1411 1412 /* Disable interrupts */ 1413 atc_disable_irq(atchan); 1414 tasklet_disable(&atchan->tasklet); 1415 1416 tasklet_kill(&atchan->tasklet); 1417 list_del(&chan->device_node); 1418 } 1419 1420 clk_disable(atdma->clk); 1421 clk_put(atdma->clk); 1422 1423 iounmap(atdma->regs); 1424 atdma->regs = NULL; 1425 1426 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1427 release_mem_region(io->start, resource_size(io)); 1428 1429 kfree(atdma); 1430 1431 return 0; 1432 } 1433 1434 static void at_dma_shutdown(struct platform_device *pdev) 1435 { 1436 struct at_dma *atdma = platform_get_drvdata(pdev); 1437 1438 at_dma_off(platform_get_drvdata(pdev)); 1439 clk_disable(atdma->clk); 1440 } 1441 1442 static int at_dma_prepare(struct device *dev) 1443 { 1444 struct platform_device *pdev = to_platform_device(dev); 1445 struct at_dma *atdma = platform_get_drvdata(pdev); 1446 struct dma_chan *chan, *_chan; 1447 1448 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1449 device_node) { 1450 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1451 /* wait for transaction completion (except in cyclic case) */ 1452 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1453 return -EAGAIN; 1454 } 1455 return 0; 1456 } 1457 1458 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1459 { 1460 struct dma_chan *chan = &atchan->chan_common; 1461 1462 /* Channel should be paused by user 1463 * do it anyway even if it is not done already */ 1464 if (!atc_chan_is_paused(atchan)) { 1465 dev_warn(chan2dev(chan), 1466 "cyclic channel not paused, should be done by channel user\n"); 1467 atc_control(chan, DMA_PAUSE, 0); 1468 } 1469 1470 /* now preserve additional data for cyclic operations */ 1471 /* next descriptor address in the cyclic list */ 1472 atchan->save_dscr = channel_readl(atchan, DSCR); 1473 1474 vdbg_dump_regs(atchan); 1475 } 1476 1477 static int at_dma_suspend_noirq(struct device *dev) 1478 { 1479 struct platform_device *pdev = to_platform_device(dev); 1480 struct at_dma *atdma = platform_get_drvdata(pdev); 1481 struct dma_chan *chan, *_chan; 1482 1483 /* preserve data */ 1484 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1485 device_node) { 1486 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1487 1488 if (atc_chan_is_cyclic(atchan)) 1489 atc_suspend_cyclic(atchan); 1490 atchan->save_cfg = channel_readl(atchan, CFG); 1491 } 1492 atdma->save_imr = dma_readl(atdma, EBCIMR); 1493 1494 /* disable DMA controller */ 1495 at_dma_off(atdma); 1496 clk_disable(atdma->clk); 1497 return 0; 1498 } 1499 1500 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1501 { 1502 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1503 1504 /* restore channel status for cyclic descriptors list: 1505 * next descriptor in the cyclic list at the time of suspend */ 1506 channel_writel(atchan, SADDR, 0); 1507 channel_writel(atchan, DADDR, 0); 1508 channel_writel(atchan, CTRLA, 0); 1509 channel_writel(atchan, CTRLB, 0); 1510 channel_writel(atchan, DSCR, atchan->save_dscr); 1511 dma_writel(atdma, CHER, atchan->mask); 1512 1513 /* channel pause status should be removed by channel user 1514 * We cannot take the initiative to do it here */ 1515 1516 vdbg_dump_regs(atchan); 1517 } 1518 1519 static int at_dma_resume_noirq(struct device *dev) 1520 { 1521 struct platform_device *pdev = to_platform_device(dev); 1522 struct at_dma *atdma = platform_get_drvdata(pdev); 1523 struct dma_chan *chan, *_chan; 1524 1525 /* bring back DMA controller */ 1526 clk_enable(atdma->clk); 1527 dma_writel(atdma, EN, AT_DMA_ENABLE); 1528 1529 /* clear any pending interrupt */ 1530 while (dma_readl(atdma, EBCISR)) 1531 cpu_relax(); 1532 1533 /* restore saved data */ 1534 dma_writel(atdma, EBCIER, atdma->save_imr); 1535 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1536 device_node) { 1537 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1538 1539 channel_writel(atchan, CFG, atchan->save_cfg); 1540 if (atc_chan_is_cyclic(atchan)) 1541 atc_resume_cyclic(atchan); 1542 } 1543 return 0; 1544 } 1545 1546 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1547 .prepare = at_dma_prepare, 1548 .suspend_noirq = at_dma_suspend_noirq, 1549 .resume_noirq = at_dma_resume_noirq, 1550 }; 1551 1552 static struct platform_driver at_dma_driver = { 1553 .remove = __exit_p(at_dma_remove), 1554 .shutdown = at_dma_shutdown, 1555 .id_table = atdma_devtypes, 1556 .driver = { 1557 .name = "at_hdmac", 1558 .pm = &at_dma_dev_pm_ops, 1559 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1560 }, 1561 }; 1562 1563 static int __init at_dma_init(void) 1564 { 1565 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1566 } 1567 subsys_initcall(at_dma_init); 1568 1569 static void __exit at_dma_exit(void) 1570 { 1571 platform_driver_unregister(&at_dma_driver); 1572 } 1573 module_exit(at_dma_exit); 1574 1575 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1576 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1577 MODULE_LICENSE("GPL"); 1578 MODULE_ALIAS("platform:at_hdmac"); 1579