1 /* linux/drivers/dma/pl330.c 2 * 3 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 4 * Jaswinder Singh <jassi.brar@samsung.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/io.h> 13 #include <linux/init.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/dmaengine.h> 17 #include <linux/interrupt.h> 18 #include <linux/amba/bus.h> 19 #include <linux/amba/pl330.h> 20 21 #define NR_DEFAULT_DESC 16 22 23 enum desc_status { 24 /* In the DMAC pool */ 25 FREE, 26 /* 27 * Allocted to some channel during prep_xxx 28 * Also may be sitting on the work_list. 29 */ 30 PREP, 31 /* 32 * Sitting on the work_list and already submitted 33 * to the PL330 core. Not more than two descriptors 34 * of a channel can be BUSY at any time. 35 */ 36 BUSY, 37 /* 38 * Sitting on the channel work_list but xfer done 39 * by PL330 core 40 */ 41 DONE, 42 }; 43 44 struct dma_pl330_chan { 45 /* Schedule desc completion */ 46 struct tasklet_struct task; 47 48 /* DMA-Engine Channel */ 49 struct dma_chan chan; 50 51 /* Last completed cookie */ 52 dma_cookie_t completed; 53 54 /* List of to be xfered descriptors */ 55 struct list_head work_list; 56 57 /* Pointer to the DMAC that manages this channel, 58 * NULL if the channel is available to be acquired. 59 * As the parent, this DMAC also provides descriptors 60 * to the channel. 61 */ 62 struct dma_pl330_dmac *dmac; 63 64 /* To protect channel manipulation */ 65 spinlock_t lock; 66 67 /* Token of a hardware channel thread of PL330 DMAC 68 * NULL if the channel is available to be acquired. 69 */ 70 void *pl330_chid; 71 }; 72 73 struct dma_pl330_dmac { 74 struct pl330_info pif; 75 76 /* DMA-Engine Device */ 77 struct dma_device ddma; 78 79 /* Pool of descriptors available for the DMAC's channels */ 80 struct list_head desc_pool; 81 /* To protect desc_pool manipulation */ 82 spinlock_t pool_lock; 83 84 /* Peripheral channels connected to this DMAC */ 85 struct dma_pl330_chan *peripherals; /* keep at end */ 86 }; 87 88 struct dma_pl330_desc { 89 /* To attach to a queue as child */ 90 struct list_head node; 91 92 /* Descriptor for the DMA Engine API */ 93 struct dma_async_tx_descriptor txd; 94 95 /* Xfer for PL330 core */ 96 struct pl330_xfer px; 97 98 struct pl330_reqcfg rqcfg; 99 struct pl330_req req; 100 101 enum desc_status status; 102 103 /* The channel which currently holds this desc */ 104 struct dma_pl330_chan *pchan; 105 }; 106 107 static inline struct dma_pl330_chan * 108 to_pchan(struct dma_chan *ch) 109 { 110 if (!ch) 111 return NULL; 112 113 return container_of(ch, struct dma_pl330_chan, chan); 114 } 115 116 static inline struct dma_pl330_desc * 117 to_desc(struct dma_async_tx_descriptor *tx) 118 { 119 return container_of(tx, struct dma_pl330_desc, txd); 120 } 121 122 static inline void free_desc_list(struct list_head *list) 123 { 124 struct dma_pl330_dmac *pdmac; 125 struct dma_pl330_desc *desc; 126 struct dma_pl330_chan *pch; 127 unsigned long flags; 128 129 if (list_empty(list)) 130 return; 131 132 /* Finish off the work list */ 133 list_for_each_entry(desc, list, node) { 134 dma_async_tx_callback callback; 135 void *param; 136 137 /* All desc in a list belong to same channel */ 138 pch = desc->pchan; 139 callback = desc->txd.callback; 140 param = desc->txd.callback_param; 141 142 if (callback) 143 callback(param); 144 145 desc->pchan = NULL; 146 } 147 148 pdmac = pch->dmac; 149 150 spin_lock_irqsave(&pdmac->pool_lock, flags); 151 list_splice_tail_init(list, &pdmac->desc_pool); 152 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 153 } 154 155 static inline void fill_queue(struct dma_pl330_chan *pch) 156 { 157 struct dma_pl330_desc *desc; 158 int ret; 159 160 list_for_each_entry(desc, &pch->work_list, node) { 161 162 /* If already submitted */ 163 if (desc->status == BUSY) 164 break; 165 166 ret = pl330_submit_req(pch->pl330_chid, 167 &desc->req); 168 if (!ret) { 169 desc->status = BUSY; 170 break; 171 } else if (ret == -EAGAIN) { 172 /* QFull or DMAC Dying */ 173 break; 174 } else { 175 /* Unacceptable request */ 176 desc->status = DONE; 177 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", 178 __func__, __LINE__, desc->txd.cookie); 179 tasklet_schedule(&pch->task); 180 } 181 } 182 } 183 184 static void pl330_tasklet(unsigned long data) 185 { 186 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; 187 struct dma_pl330_desc *desc, *_dt; 188 unsigned long flags; 189 LIST_HEAD(list); 190 191 spin_lock_irqsave(&pch->lock, flags); 192 193 /* Pick up ripe tomatoes */ 194 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 195 if (desc->status == DONE) { 196 pch->completed = desc->txd.cookie; 197 list_move_tail(&desc->node, &list); 198 } 199 200 /* Try to submit a req imm. next to the last completed cookie */ 201 fill_queue(pch); 202 203 /* Make sure the PL330 Channel thread is active */ 204 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); 205 206 spin_unlock_irqrestore(&pch->lock, flags); 207 208 free_desc_list(&list); 209 } 210 211 static void dma_pl330_rqcb(void *token, enum pl330_op_err err) 212 { 213 struct dma_pl330_desc *desc = token; 214 struct dma_pl330_chan *pch = desc->pchan; 215 unsigned long flags; 216 217 /* If desc aborted */ 218 if (!pch) 219 return; 220 221 spin_lock_irqsave(&pch->lock, flags); 222 223 desc->status = DONE; 224 225 spin_unlock_irqrestore(&pch->lock, flags); 226 227 tasklet_schedule(&pch->task); 228 } 229 230 static int pl330_alloc_chan_resources(struct dma_chan *chan) 231 { 232 struct dma_pl330_chan *pch = to_pchan(chan); 233 struct dma_pl330_dmac *pdmac = pch->dmac; 234 unsigned long flags; 235 236 spin_lock_irqsave(&pch->lock, flags); 237 238 pch->completed = chan->cookie = 1; 239 240 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 241 if (!pch->pl330_chid) { 242 spin_unlock_irqrestore(&pch->lock, flags); 243 return 0; 244 } 245 246 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); 247 248 spin_unlock_irqrestore(&pch->lock, flags); 249 250 return 1; 251 } 252 253 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 254 { 255 struct dma_pl330_chan *pch = to_pchan(chan); 256 struct dma_pl330_desc *desc; 257 unsigned long flags; 258 259 /* Only supports DMA_TERMINATE_ALL */ 260 if (cmd != DMA_TERMINATE_ALL) 261 return -ENXIO; 262 263 spin_lock_irqsave(&pch->lock, flags); 264 265 /* FLUSH the PL330 Channel thread */ 266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 267 268 /* Mark all desc done */ 269 list_for_each_entry(desc, &pch->work_list, node) 270 desc->status = DONE; 271 272 spin_unlock_irqrestore(&pch->lock, flags); 273 274 pl330_tasklet((unsigned long) pch); 275 276 return 0; 277 } 278 279 static void pl330_free_chan_resources(struct dma_chan *chan) 280 { 281 struct dma_pl330_chan *pch = to_pchan(chan); 282 unsigned long flags; 283 284 spin_lock_irqsave(&pch->lock, flags); 285 286 tasklet_kill(&pch->task); 287 288 pl330_release_channel(pch->pl330_chid); 289 pch->pl330_chid = NULL; 290 291 spin_unlock_irqrestore(&pch->lock, flags); 292 } 293 294 static enum dma_status 295 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 296 struct dma_tx_state *txstate) 297 { 298 struct dma_pl330_chan *pch = to_pchan(chan); 299 dma_cookie_t last_done, last_used; 300 int ret; 301 302 last_done = pch->completed; 303 last_used = chan->cookie; 304 305 ret = dma_async_is_complete(cookie, last_done, last_used); 306 307 dma_set_tx_state(txstate, last_done, last_used, 0); 308 309 return ret; 310 } 311 312 static void pl330_issue_pending(struct dma_chan *chan) 313 { 314 pl330_tasklet((unsigned long) to_pchan(chan)); 315 } 316 317 /* 318 * We returned the last one of the circular list of descriptor(s) 319 * from prep_xxx, so the argument to submit corresponds to the last 320 * descriptor of the list. 321 */ 322 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) 323 { 324 struct dma_pl330_desc *desc, *last = to_desc(tx); 325 struct dma_pl330_chan *pch = to_pchan(tx->chan); 326 dma_cookie_t cookie; 327 unsigned long flags; 328 329 spin_lock_irqsave(&pch->lock, flags); 330 331 /* Assign cookies to all nodes */ 332 cookie = tx->chan->cookie; 333 334 while (!list_empty(&last->node)) { 335 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 336 337 if (++cookie < 0) 338 cookie = 1; 339 desc->txd.cookie = cookie; 340 341 list_move_tail(&desc->node, &pch->work_list); 342 } 343 344 if (++cookie < 0) 345 cookie = 1; 346 last->txd.cookie = cookie; 347 348 list_add_tail(&last->node, &pch->work_list); 349 350 tx->chan->cookie = cookie; 351 352 spin_unlock_irqrestore(&pch->lock, flags); 353 354 return cookie; 355 } 356 357 static inline void _init_desc(struct dma_pl330_desc *desc) 358 { 359 desc->pchan = NULL; 360 desc->req.x = &desc->px; 361 desc->req.token = desc; 362 desc->rqcfg.swap = SWAP_NO; 363 desc->rqcfg.privileged = 0; 364 desc->rqcfg.insnaccess = 0; 365 desc->rqcfg.scctl = SCCTRL0; 366 desc->rqcfg.dcctl = DCCTRL0; 367 desc->req.cfg = &desc->rqcfg; 368 desc->req.xfer_cb = dma_pl330_rqcb; 369 desc->txd.tx_submit = pl330_tx_submit; 370 371 INIT_LIST_HEAD(&desc->node); 372 } 373 374 /* Returns the number of descriptors added to the DMAC pool */ 375 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) 376 { 377 struct dma_pl330_desc *desc; 378 unsigned long flags; 379 int i; 380 381 if (!pdmac) 382 return 0; 383 384 desc = kmalloc(count * sizeof(*desc), flg); 385 if (!desc) 386 return 0; 387 388 spin_lock_irqsave(&pdmac->pool_lock, flags); 389 390 for (i = 0; i < count; i++) { 391 _init_desc(&desc[i]); 392 list_add_tail(&desc[i].node, &pdmac->desc_pool); 393 } 394 395 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 396 397 return count; 398 } 399 400 static struct dma_pl330_desc * 401 pluck_desc(struct dma_pl330_dmac *pdmac) 402 { 403 struct dma_pl330_desc *desc = NULL; 404 unsigned long flags; 405 406 if (!pdmac) 407 return NULL; 408 409 spin_lock_irqsave(&pdmac->pool_lock, flags); 410 411 if (!list_empty(&pdmac->desc_pool)) { 412 desc = list_entry(pdmac->desc_pool.next, 413 struct dma_pl330_desc, node); 414 415 list_del_init(&desc->node); 416 417 desc->status = PREP; 418 desc->txd.callback = NULL; 419 } 420 421 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 422 423 return desc; 424 } 425 426 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) 427 { 428 struct dma_pl330_dmac *pdmac = pch->dmac; 429 struct dma_pl330_peri *peri = pch->chan.private; 430 struct dma_pl330_desc *desc; 431 432 /* Pluck one desc from the pool of DMAC */ 433 desc = pluck_desc(pdmac); 434 435 /* If the DMAC pool is empty, alloc new */ 436 if (!desc) { 437 if (!add_desc(pdmac, GFP_ATOMIC, 1)) 438 return NULL; 439 440 /* Try again */ 441 desc = pluck_desc(pdmac); 442 if (!desc) { 443 dev_err(pch->dmac->pif.dev, 444 "%s:%d ALERT!\n", __func__, __LINE__); 445 return NULL; 446 } 447 } 448 449 /* Initialize the descriptor */ 450 desc->pchan = pch; 451 desc->txd.cookie = 0; 452 async_tx_ack(&desc->txd); 453 454 if (peri) { 455 desc->req.rqtype = peri->rqtype; 456 desc->req.peri = peri->peri_id; 457 } else { 458 desc->req.rqtype = MEMTOMEM; 459 desc->req.peri = 0; 460 } 461 462 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); 463 464 return desc; 465 } 466 467 static inline void fill_px(struct pl330_xfer *px, 468 dma_addr_t dst, dma_addr_t src, size_t len) 469 { 470 px->next = NULL; 471 px->bytes = len; 472 px->dst_addr = dst; 473 px->src_addr = src; 474 } 475 476 static struct dma_pl330_desc * 477 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, 478 dma_addr_t src, size_t len) 479 { 480 struct dma_pl330_desc *desc = pl330_get_desc(pch); 481 482 if (!desc) { 483 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", 484 __func__, __LINE__); 485 return NULL; 486 } 487 488 /* 489 * Ideally we should lookout for reqs bigger than 490 * those that can be programmed with 256 bytes of 491 * MC buffer, but considering a req size is seldom 492 * going to be word-unaligned and more than 200MB, 493 * we take it easy. 494 * Also, should the limit is reached we'd rather 495 * have the platform increase MC buffer size than 496 * complicating this API driver. 497 */ 498 fill_px(&desc->px, dst, src, len); 499 500 return desc; 501 } 502 503 /* Call after fixing burst size */ 504 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) 505 { 506 struct dma_pl330_chan *pch = desc->pchan; 507 struct pl330_info *pi = &pch->dmac->pif; 508 int burst_len; 509 510 burst_len = pi->pcfg.data_bus_width / 8; 511 burst_len *= pi->pcfg.data_buf_dep; 512 burst_len >>= desc->rqcfg.brst_size; 513 514 /* src/dst_burst_len can't be more than 16 */ 515 if (burst_len > 16) 516 burst_len = 16; 517 518 while (burst_len > 1) { 519 if (!(len % (burst_len << desc->rqcfg.brst_size))) 520 break; 521 burst_len--; 522 } 523 524 return burst_len; 525 } 526 527 static struct dma_async_tx_descriptor * 528 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 529 dma_addr_t src, size_t len, unsigned long flags) 530 { 531 struct dma_pl330_desc *desc; 532 struct dma_pl330_chan *pch = to_pchan(chan); 533 struct dma_pl330_peri *peri = chan->private; 534 struct pl330_info *pi; 535 int burst; 536 537 if (unlikely(!pch || !len)) 538 return NULL; 539 540 if (peri && peri->rqtype != MEMTOMEM) 541 return NULL; 542 543 pi = &pch->dmac->pif; 544 545 desc = __pl330_prep_dma_memcpy(pch, dst, src, len); 546 if (!desc) 547 return NULL; 548 549 desc->rqcfg.src_inc = 1; 550 desc->rqcfg.dst_inc = 1; 551 552 /* Select max possible burst size */ 553 burst = pi->pcfg.data_bus_width / 8; 554 555 while (burst > 1) { 556 if (!(len % burst)) 557 break; 558 burst /= 2; 559 } 560 561 desc->rqcfg.brst_size = 0; 562 while (burst != (1 << desc->rqcfg.brst_size)) 563 desc->rqcfg.brst_size++; 564 565 desc->rqcfg.brst_len = get_burst_len(desc, len); 566 567 desc->txd.flags = flags; 568 569 return &desc->txd; 570 } 571 572 static struct dma_async_tx_descriptor * 573 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 574 unsigned int sg_len, enum dma_data_direction direction, 575 unsigned long flg) 576 { 577 struct dma_pl330_desc *first, *desc = NULL; 578 struct dma_pl330_chan *pch = to_pchan(chan); 579 struct dma_pl330_peri *peri = chan->private; 580 struct scatterlist *sg; 581 unsigned long flags; 582 int i, burst_size; 583 dma_addr_t addr; 584 585 if (unlikely(!pch || !sgl || !sg_len || !peri)) 586 return NULL; 587 588 /* Make sure the direction is consistent */ 589 if ((direction == DMA_TO_DEVICE && 590 peri->rqtype != MEMTODEV) || 591 (direction == DMA_FROM_DEVICE && 592 peri->rqtype != DEVTOMEM)) { 593 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", 594 __func__, __LINE__); 595 return NULL; 596 } 597 598 addr = peri->fifo_addr; 599 burst_size = peri->burst_sz; 600 601 first = NULL; 602 603 for_each_sg(sgl, sg, sg_len, i) { 604 605 desc = pl330_get_desc(pch); 606 if (!desc) { 607 struct dma_pl330_dmac *pdmac = pch->dmac; 608 609 dev_err(pch->dmac->pif.dev, 610 "%s:%d Unable to fetch desc\n", 611 __func__, __LINE__); 612 if (!first) 613 return NULL; 614 615 spin_lock_irqsave(&pdmac->pool_lock, flags); 616 617 while (!list_empty(&first->node)) { 618 desc = list_entry(first->node.next, 619 struct dma_pl330_desc, node); 620 list_move_tail(&desc->node, &pdmac->desc_pool); 621 } 622 623 list_move_tail(&first->node, &pdmac->desc_pool); 624 625 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 626 627 return NULL; 628 } 629 630 if (!first) 631 first = desc; 632 else 633 list_add_tail(&desc->node, &first->node); 634 635 if (direction == DMA_TO_DEVICE) { 636 desc->rqcfg.src_inc = 1; 637 desc->rqcfg.dst_inc = 0; 638 fill_px(&desc->px, 639 addr, sg_dma_address(sg), sg_dma_len(sg)); 640 } else { 641 desc->rqcfg.src_inc = 0; 642 desc->rqcfg.dst_inc = 1; 643 fill_px(&desc->px, 644 sg_dma_address(sg), addr, sg_dma_len(sg)); 645 } 646 647 desc->rqcfg.brst_size = burst_size; 648 desc->rqcfg.brst_len = 1; 649 } 650 651 /* Return the last desc in the chain */ 652 desc->txd.flags = flg; 653 return &desc->txd; 654 } 655 656 static irqreturn_t pl330_irq_handler(int irq, void *data) 657 { 658 if (pl330_update(data)) 659 return IRQ_HANDLED; 660 else 661 return IRQ_NONE; 662 } 663 664 static int __devinit 665 pl330_probe(struct amba_device *adev, const struct amba_id *id) 666 { 667 struct dma_pl330_platdata *pdat; 668 struct dma_pl330_dmac *pdmac; 669 struct dma_pl330_chan *pch; 670 struct pl330_info *pi; 671 struct dma_device *pd; 672 struct resource *res; 673 int i, ret, irq; 674 int num_chan; 675 676 pdat = adev->dev.platform_data; 677 678 /* Allocate a new DMAC and its Channels */ 679 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); 680 if (!pdmac) { 681 dev_err(&adev->dev, "unable to allocate mem\n"); 682 return -ENOMEM; 683 } 684 685 pi = &pdmac->pif; 686 pi->dev = &adev->dev; 687 pi->pl330_data = NULL; 688 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; 689 690 res = &adev->res; 691 request_mem_region(res->start, resource_size(res), "dma-pl330"); 692 693 pi->base = ioremap(res->start, resource_size(res)); 694 if (!pi->base) { 695 ret = -ENXIO; 696 goto probe_err1; 697 } 698 699 irq = adev->irq[0]; 700 ret = request_irq(irq, pl330_irq_handler, 0, 701 dev_name(&adev->dev), pi); 702 if (ret) 703 goto probe_err2; 704 705 ret = pl330_add(pi); 706 if (ret) 707 goto probe_err3; 708 709 INIT_LIST_HEAD(&pdmac->desc_pool); 710 spin_lock_init(&pdmac->pool_lock); 711 712 /* Create a descriptor pool of default size */ 713 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) 714 dev_warn(&adev->dev, "unable to allocate desc\n"); 715 716 pd = &pdmac->ddma; 717 INIT_LIST_HEAD(&pd->channels); 718 719 /* Initialize channel parameters */ 720 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); 721 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 722 723 for (i = 0; i < num_chan; i++) { 724 pch = &pdmac->peripherals[i]; 725 if (pdat) { 726 struct dma_pl330_peri *peri = &pdat->peri[i]; 727 728 switch (peri->rqtype) { 729 case MEMTOMEM: 730 dma_cap_set(DMA_MEMCPY, pd->cap_mask); 731 break; 732 case MEMTODEV: 733 case DEVTOMEM: 734 dma_cap_set(DMA_SLAVE, pd->cap_mask); 735 break; 736 default: 737 dev_err(&adev->dev, "DEVTODEV Not Supported\n"); 738 continue; 739 } 740 pch->chan.private = peri; 741 } else { 742 dma_cap_set(DMA_MEMCPY, pd->cap_mask); 743 pch->chan.private = NULL; 744 } 745 746 INIT_LIST_HEAD(&pch->work_list); 747 spin_lock_init(&pch->lock); 748 pch->pl330_chid = NULL; 749 pch->chan.device = pd; 750 pch->chan.chan_id = i; 751 pch->dmac = pdmac; 752 753 /* Add the channel to the DMAC list */ 754 pd->chancnt++; 755 list_add_tail(&pch->chan.device_node, &pd->channels); 756 } 757 758 pd->dev = &adev->dev; 759 760 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 761 pd->device_free_chan_resources = pl330_free_chan_resources; 762 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 763 pd->device_tx_status = pl330_tx_status; 764 pd->device_prep_slave_sg = pl330_prep_slave_sg; 765 pd->device_control = pl330_control; 766 pd->device_issue_pending = pl330_issue_pending; 767 768 ret = dma_async_device_register(pd); 769 if (ret) { 770 dev_err(&adev->dev, "unable to register DMAC\n"); 771 goto probe_err4; 772 } 773 774 amba_set_drvdata(adev, pdmac); 775 776 dev_info(&adev->dev, 777 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 778 dev_info(&adev->dev, 779 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", 780 pi->pcfg.data_buf_dep, 781 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, 782 pi->pcfg.num_peri, pi->pcfg.num_events); 783 784 return 0; 785 786 probe_err4: 787 pl330_del(pi); 788 probe_err3: 789 free_irq(irq, pi); 790 probe_err2: 791 iounmap(pi->base); 792 probe_err1: 793 release_mem_region(res->start, resource_size(res)); 794 kfree(pdmac); 795 796 return ret; 797 } 798 799 static int __devexit pl330_remove(struct amba_device *adev) 800 { 801 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); 802 struct dma_pl330_chan *pch, *_p; 803 struct pl330_info *pi; 804 struct resource *res; 805 int irq; 806 807 if (!pdmac) 808 return 0; 809 810 amba_set_drvdata(adev, NULL); 811 812 /* Idle the DMAC */ 813 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, 814 chan.device_node) { 815 816 /* Remove the channel */ 817 list_del(&pch->chan.device_node); 818 819 /* Flush the channel */ 820 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 821 pl330_free_chan_resources(&pch->chan); 822 } 823 824 pi = &pdmac->pif; 825 826 pl330_del(pi); 827 828 irq = adev->irq[0]; 829 free_irq(irq, pi); 830 831 iounmap(pi->base); 832 833 res = &adev->res; 834 release_mem_region(res->start, resource_size(res)); 835 836 kfree(pdmac); 837 838 return 0; 839 } 840 841 static struct amba_id pl330_ids[] = { 842 { 843 .id = 0x00041330, 844 .mask = 0x000fffff, 845 }, 846 { 0, 0 }, 847 }; 848 849 static struct amba_driver pl330_driver = { 850 .drv = { 851 .owner = THIS_MODULE, 852 .name = "dma-pl330", 853 }, 854 .id_table = pl330_ids, 855 .probe = pl330_probe, 856 .remove = pl330_remove, 857 }; 858 859 static int __init pl330_init(void) 860 { 861 return amba_driver_register(&pl330_driver); 862 } 863 module_init(pl330_init); 864 865 static void __exit pl330_exit(void) 866 { 867 amba_driver_unregister(&pl330_driver); 868 return; 869 } 870 module_exit(pl330_exit); 871 872 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 873 MODULE_DESCRIPTION("API Driver for PL330 DMAC"); 874 MODULE_LICENSE("GPL"); 875