1 /* 2 * DMA driver for Nvidia's Tegra20 APB DMA controller. 3 * 4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/bitops.h> 20 #include <linux/clk.h> 21 #include <linux/delay.h> 22 #include <linux/dmaengine.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/init.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/platform_device.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/slab.h> 34 35 #include <mach/clk.h> 36 #include "dmaengine.h" 37 38 #define TEGRA_APBDMA_GENERAL 0x0 39 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) 40 41 #define TEGRA_APBDMA_CONTROL 0x010 42 #define TEGRA_APBDMA_IRQ_MASK 0x01c 43 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 44 45 /* CSR register */ 46 #define TEGRA_APBDMA_CHAN_CSR 0x00 47 #define TEGRA_APBDMA_CSR_ENB BIT(31) 48 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) 49 #define TEGRA_APBDMA_CSR_HOLD BIT(29) 50 #define TEGRA_APBDMA_CSR_DIR BIT(28) 51 #define TEGRA_APBDMA_CSR_ONCE BIT(27) 52 #define TEGRA_APBDMA_CSR_FLOW BIT(21) 53 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 54 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC 55 56 /* STATUS register */ 57 #define TEGRA_APBDMA_CHAN_STATUS 0x004 58 #define TEGRA_APBDMA_STATUS_BUSY BIT(31) 59 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) 60 #define TEGRA_APBDMA_STATUS_HALT BIT(29) 61 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) 62 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 63 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC 64 65 /* AHB memory address */ 66 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 67 68 /* AHB sequence register */ 69 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 70 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) 71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) 72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) 73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) 74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) 75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) 76 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) 77 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) 78 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) 79 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) 80 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) 81 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 82 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 83 84 /* APB address */ 85 #define TEGRA_APBDMA_CHAN_APBPTR 0x018 86 87 /* APB sequence register */ 88 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c 89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) 90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) 91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) 92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) 93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) 94 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 95 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 96 97 /* 98 * If any burst is in flight and DMA paused then this is the time to complete 99 * on-flight burst and update DMA status register. 100 */ 101 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 102 103 /* Channel base address offset from APBDMA base address */ 104 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 105 106 /* DMA channel register space size */ 107 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 108 109 struct tegra_dma; 110 111 /* 112 * tegra_dma_chip_data Tegra chip specific DMA data 113 * @nr_channels: Number of channels available in the controller. 114 * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 115 */ 116 struct tegra_dma_chip_data { 117 int nr_channels; 118 int max_dma_count; 119 }; 120 121 /* DMA channel registers */ 122 struct tegra_dma_channel_regs { 123 unsigned long csr; 124 unsigned long ahb_ptr; 125 unsigned long apb_ptr; 126 unsigned long ahb_seq; 127 unsigned long apb_seq; 128 }; 129 130 /* 131 * tegra_dma_sg_req: Dma request details to configure hardware. This 132 * contains the details for one transfer to configure DMA hw. 133 * The client's request for data transfer can be broken into multiple 134 * sub-transfer as per requester details and hw support. 135 * This sub transfer get added in the list of transfer and point to Tegra 136 * DMA descriptor which manages the transfer details. 137 */ 138 struct tegra_dma_sg_req { 139 struct tegra_dma_channel_regs ch_regs; 140 int req_len; 141 bool configured; 142 bool last_sg; 143 bool half_done; 144 struct list_head node; 145 struct tegra_dma_desc *dma_desc; 146 }; 147 148 /* 149 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. 150 * This descriptor keep track of transfer status, callbacks and request 151 * counts etc. 152 */ 153 struct tegra_dma_desc { 154 struct dma_async_tx_descriptor txd; 155 int bytes_requested; 156 int bytes_transferred; 157 enum dma_status dma_status; 158 struct list_head node; 159 struct list_head tx_list; 160 struct list_head cb_node; 161 int cb_count; 162 }; 163 164 struct tegra_dma_channel; 165 166 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, 167 bool to_terminate); 168 169 /* tegra_dma_channel: Channel specific information */ 170 struct tegra_dma_channel { 171 struct dma_chan dma_chan; 172 bool config_init; 173 int id; 174 int irq; 175 unsigned long chan_base_offset; 176 spinlock_t lock; 177 bool busy; 178 struct tegra_dma *tdma; 179 bool cyclic; 180 181 /* Different lists for managing the requests */ 182 struct list_head free_sg_req; 183 struct list_head pending_sg_req; 184 struct list_head free_dma_desc; 185 struct list_head cb_desc; 186 187 /* ISR handler and tasklet for bottom half of isr handling */ 188 dma_isr_handler isr_handler; 189 struct tasklet_struct tasklet; 190 dma_async_tx_callback callback; 191 void *callback_param; 192 193 /* Channel-slave specific configuration */ 194 struct dma_slave_config dma_sconfig; 195 }; 196 197 /* tegra_dma: Tegra DMA specific information */ 198 struct tegra_dma { 199 struct dma_device dma_dev; 200 struct device *dev; 201 struct clk *dma_clk; 202 spinlock_t global_lock; 203 void __iomem *base_addr; 204 struct tegra_dma_chip_data *chip_data; 205 206 /* Some register need to be cache before suspend */ 207 u32 reg_gen; 208 209 /* Last member of the structure */ 210 struct tegra_dma_channel channels[0]; 211 }; 212 213 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) 214 { 215 writel(val, tdma->base_addr + reg); 216 } 217 218 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) 219 { 220 return readl(tdma->base_addr + reg); 221 } 222 223 static inline void tdc_write(struct tegra_dma_channel *tdc, 224 u32 reg, u32 val) 225 { 226 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); 227 } 228 229 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 230 { 231 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); 232 } 233 234 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 235 { 236 return container_of(dc, struct tegra_dma_channel, dma_chan); 237 } 238 239 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( 240 struct dma_async_tx_descriptor *td) 241 { 242 return container_of(td, struct tegra_dma_desc, txd); 243 } 244 245 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 246 { 247 return &tdc->dma_chan.dev->device; 248 } 249 250 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); 251 static int tegra_dma_runtime_suspend(struct device *dev); 252 static int tegra_dma_runtime_resume(struct device *dev); 253 254 /* Get DMA desc from free list, if not there then allocate it. */ 255 static struct tegra_dma_desc *tegra_dma_desc_get( 256 struct tegra_dma_channel *tdc) 257 { 258 struct tegra_dma_desc *dma_desc; 259 unsigned long flags; 260 261 spin_lock_irqsave(&tdc->lock, flags); 262 263 /* Do not allocate if desc are waiting for ack */ 264 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 265 if (async_tx_test_ack(&dma_desc->txd)) { 266 list_del(&dma_desc->node); 267 spin_unlock_irqrestore(&tdc->lock, flags); 268 return dma_desc; 269 } 270 } 271 272 spin_unlock_irqrestore(&tdc->lock, flags); 273 274 /* Allocate DMA desc */ 275 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); 276 if (!dma_desc) { 277 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); 278 return NULL; 279 } 280 281 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); 282 dma_desc->txd.tx_submit = tegra_dma_tx_submit; 283 dma_desc->txd.flags = 0; 284 return dma_desc; 285 } 286 287 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, 288 struct tegra_dma_desc *dma_desc) 289 { 290 unsigned long flags; 291 292 spin_lock_irqsave(&tdc->lock, flags); 293 if (!list_empty(&dma_desc->tx_list)) 294 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); 295 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 296 spin_unlock_irqrestore(&tdc->lock, flags); 297 } 298 299 static struct tegra_dma_sg_req *tegra_dma_sg_req_get( 300 struct tegra_dma_channel *tdc) 301 { 302 struct tegra_dma_sg_req *sg_req = NULL; 303 unsigned long flags; 304 305 spin_lock_irqsave(&tdc->lock, flags); 306 if (!list_empty(&tdc->free_sg_req)) { 307 sg_req = list_first_entry(&tdc->free_sg_req, 308 typeof(*sg_req), node); 309 list_del(&sg_req->node); 310 spin_unlock_irqrestore(&tdc->lock, flags); 311 return sg_req; 312 } 313 spin_unlock_irqrestore(&tdc->lock, flags); 314 315 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); 316 if (!sg_req) 317 dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); 318 return sg_req; 319 } 320 321 static int tegra_dma_slave_config(struct dma_chan *dc, 322 struct dma_slave_config *sconfig) 323 { 324 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 325 326 if (!list_empty(&tdc->pending_sg_req)) { 327 dev_err(tdc2dev(tdc), "Configuration not allowed\n"); 328 return -EBUSY; 329 } 330 331 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 332 tdc->config_init = true; 333 return 0; 334 } 335 336 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, 337 bool wait_for_burst_complete) 338 { 339 struct tegra_dma *tdma = tdc->tdma; 340 341 spin_lock(&tdma->global_lock); 342 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); 343 if (wait_for_burst_complete) 344 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 345 } 346 347 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) 348 { 349 struct tegra_dma *tdma = tdc->tdma; 350 351 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 352 spin_unlock(&tdma->global_lock); 353 } 354 355 static void tegra_dma_stop(struct tegra_dma_channel *tdc) 356 { 357 u32 csr; 358 u32 status; 359 360 /* Disable interrupts */ 361 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 362 csr &= ~TEGRA_APBDMA_CSR_IE_EOC; 363 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 364 365 /* Disable DMA */ 366 csr &= ~TEGRA_APBDMA_CSR_ENB; 367 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 368 369 /* Clear interrupt status if it is there */ 370 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 371 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 372 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 373 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 374 } 375 tdc->busy = false; 376 } 377 378 static void tegra_dma_start(struct tegra_dma_channel *tdc, 379 struct tegra_dma_sg_req *sg_req) 380 { 381 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; 382 383 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); 384 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); 385 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 386 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 387 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 388 389 /* Start DMA */ 390 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 391 ch_regs->csr | TEGRA_APBDMA_CSR_ENB); 392 } 393 394 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, 395 struct tegra_dma_sg_req *nsg_req) 396 { 397 unsigned long status; 398 399 /* 400 * The DMA controller reloads the new configuration for next transfer 401 * after last burst of current transfer completes. 402 * If there is no IEC status then this makes sure that last burst 403 * has not be completed. There may be case that last burst is on 404 * flight and so it can complete but because DMA is paused, it 405 * will not generates interrupt as well as not reload the new 406 * configuration. 407 * If there is already IEC status then interrupt handler need to 408 * load new configuration. 409 */ 410 tegra_dma_global_pause(tdc, false); 411 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 412 413 /* 414 * If interrupt is pending then do nothing as the ISR will handle 415 * the programing for new request. 416 */ 417 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 418 dev_err(tdc2dev(tdc), 419 "Skipping new configuration as interrupt is pending\n"); 420 tegra_dma_global_resume(tdc); 421 return; 422 } 423 424 /* Safe to program new configuration */ 425 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 426 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 427 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 428 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 429 nsg_req->configured = true; 430 431 tegra_dma_global_resume(tdc); 432 } 433 434 static void tdc_start_head_req(struct tegra_dma_channel *tdc) 435 { 436 struct tegra_dma_sg_req *sg_req; 437 438 if (list_empty(&tdc->pending_sg_req)) 439 return; 440 441 sg_req = list_first_entry(&tdc->pending_sg_req, 442 typeof(*sg_req), node); 443 tegra_dma_start(tdc, sg_req); 444 sg_req->configured = true; 445 tdc->busy = true; 446 } 447 448 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) 449 { 450 struct tegra_dma_sg_req *hsgreq; 451 struct tegra_dma_sg_req *hnsgreq; 452 453 if (list_empty(&tdc->pending_sg_req)) 454 return; 455 456 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 457 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { 458 hnsgreq = list_first_entry(&hsgreq->node, 459 typeof(*hnsgreq), node); 460 tegra_dma_configure_for_next(tdc, hnsgreq); 461 } 462 } 463 464 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, 465 struct tegra_dma_sg_req *sg_req, unsigned long status) 466 { 467 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; 468 } 469 470 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) 471 { 472 struct tegra_dma_sg_req *sgreq; 473 struct tegra_dma_desc *dma_desc; 474 475 while (!list_empty(&tdc->pending_sg_req)) { 476 sgreq = list_first_entry(&tdc->pending_sg_req, 477 typeof(*sgreq), node); 478 list_del(&sgreq->node); 479 list_add_tail(&sgreq->node, &tdc->free_sg_req); 480 if (sgreq->last_sg) { 481 dma_desc = sgreq->dma_desc; 482 dma_desc->dma_status = DMA_ERROR; 483 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 484 485 /* Add in cb list if it is not there. */ 486 if (!dma_desc->cb_count) 487 list_add_tail(&dma_desc->cb_node, 488 &tdc->cb_desc); 489 dma_desc->cb_count++; 490 } 491 } 492 tdc->isr_handler = NULL; 493 } 494 495 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, 496 struct tegra_dma_sg_req *last_sg_req, bool to_terminate) 497 { 498 struct tegra_dma_sg_req *hsgreq = NULL; 499 500 if (list_empty(&tdc->pending_sg_req)) { 501 dev_err(tdc2dev(tdc), "Dma is running without req\n"); 502 tegra_dma_stop(tdc); 503 return false; 504 } 505 506 /* 507 * Check that head req on list should be in flight. 508 * If it is not in flight then abort transfer as 509 * looping of transfer can not continue. 510 */ 511 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 512 if (!hsgreq->configured) { 513 tegra_dma_stop(tdc); 514 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); 515 tegra_dma_abort_all(tdc); 516 return false; 517 } 518 519 /* Configure next request */ 520 if (!to_terminate) 521 tdc_configure_next_head_desc(tdc); 522 return true; 523 } 524 525 static void handle_once_dma_done(struct tegra_dma_channel *tdc, 526 bool to_terminate) 527 { 528 struct tegra_dma_sg_req *sgreq; 529 struct tegra_dma_desc *dma_desc; 530 531 tdc->busy = false; 532 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 533 dma_desc = sgreq->dma_desc; 534 dma_desc->bytes_transferred += sgreq->req_len; 535 536 list_del(&sgreq->node); 537 if (sgreq->last_sg) { 538 dma_desc->dma_status = DMA_SUCCESS; 539 dma_cookie_complete(&dma_desc->txd); 540 if (!dma_desc->cb_count) 541 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 542 dma_desc->cb_count++; 543 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 544 } 545 list_add_tail(&sgreq->node, &tdc->free_sg_req); 546 547 /* Do not start DMA if it is going to be terminate */ 548 if (to_terminate || list_empty(&tdc->pending_sg_req)) 549 return; 550 551 tdc_start_head_req(tdc); 552 return; 553 } 554 555 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, 556 bool to_terminate) 557 { 558 struct tegra_dma_sg_req *sgreq; 559 struct tegra_dma_desc *dma_desc; 560 bool st; 561 562 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 563 dma_desc = sgreq->dma_desc; 564 dma_desc->bytes_transferred += sgreq->req_len; 565 566 /* Callback need to be call */ 567 if (!dma_desc->cb_count) 568 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 569 dma_desc->cb_count++; 570 571 /* If not last req then put at end of pending list */ 572 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 573 list_del(&sgreq->node); 574 list_add_tail(&sgreq->node, &tdc->pending_sg_req); 575 sgreq->configured = false; 576 st = handle_continuous_head_request(tdc, sgreq, to_terminate); 577 if (!st) 578 dma_desc->dma_status = DMA_ERROR; 579 } 580 return; 581 } 582 583 static void tegra_dma_tasklet(unsigned long data) 584 { 585 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; 586 dma_async_tx_callback callback = NULL; 587 void *callback_param = NULL; 588 struct tegra_dma_desc *dma_desc; 589 unsigned long flags; 590 int cb_count; 591 592 spin_lock_irqsave(&tdc->lock, flags); 593 while (!list_empty(&tdc->cb_desc)) { 594 dma_desc = list_first_entry(&tdc->cb_desc, 595 typeof(*dma_desc), cb_node); 596 list_del(&dma_desc->cb_node); 597 callback = dma_desc->txd.callback; 598 callback_param = dma_desc->txd.callback_param; 599 cb_count = dma_desc->cb_count; 600 dma_desc->cb_count = 0; 601 spin_unlock_irqrestore(&tdc->lock, flags); 602 while (cb_count-- && callback) 603 callback(callback_param); 604 spin_lock_irqsave(&tdc->lock, flags); 605 } 606 spin_unlock_irqrestore(&tdc->lock, flags); 607 } 608 609 static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 610 { 611 struct tegra_dma_channel *tdc = dev_id; 612 unsigned long status; 613 unsigned long flags; 614 615 spin_lock_irqsave(&tdc->lock, flags); 616 617 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 618 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 619 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 620 tdc->isr_handler(tdc, false); 621 tasklet_schedule(&tdc->tasklet); 622 spin_unlock_irqrestore(&tdc->lock, flags); 623 return IRQ_HANDLED; 624 } 625 626 spin_unlock_irqrestore(&tdc->lock, flags); 627 dev_info(tdc2dev(tdc), 628 "Interrupt already served status 0x%08lx\n", status); 629 return IRQ_NONE; 630 } 631 632 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) 633 { 634 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); 635 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); 636 unsigned long flags; 637 dma_cookie_t cookie; 638 639 spin_lock_irqsave(&tdc->lock, flags); 640 dma_desc->dma_status = DMA_IN_PROGRESS; 641 cookie = dma_cookie_assign(&dma_desc->txd); 642 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); 643 spin_unlock_irqrestore(&tdc->lock, flags); 644 return cookie; 645 } 646 647 static void tegra_dma_issue_pending(struct dma_chan *dc) 648 { 649 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 650 unsigned long flags; 651 652 spin_lock_irqsave(&tdc->lock, flags); 653 if (list_empty(&tdc->pending_sg_req)) { 654 dev_err(tdc2dev(tdc), "No DMA request\n"); 655 goto end; 656 } 657 if (!tdc->busy) { 658 tdc_start_head_req(tdc); 659 660 /* Continuous single mode: Configure next req */ 661 if (tdc->cyclic) { 662 /* 663 * Wait for 1 burst time for configure DMA for 664 * next transfer. 665 */ 666 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 667 tdc_configure_next_head_desc(tdc); 668 } 669 } 670 end: 671 spin_unlock_irqrestore(&tdc->lock, flags); 672 return; 673 } 674 675 static void tegra_dma_terminate_all(struct dma_chan *dc) 676 { 677 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 678 struct tegra_dma_sg_req *sgreq; 679 struct tegra_dma_desc *dma_desc; 680 unsigned long flags; 681 unsigned long status; 682 bool was_busy; 683 684 spin_lock_irqsave(&tdc->lock, flags); 685 if (list_empty(&tdc->pending_sg_req)) { 686 spin_unlock_irqrestore(&tdc->lock, flags); 687 return; 688 } 689 690 if (!tdc->busy) 691 goto skip_dma_stop; 692 693 /* Pause DMA before checking the queue status */ 694 tegra_dma_global_pause(tdc, true); 695 696 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 697 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 698 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); 699 tdc->isr_handler(tdc, true); 700 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 701 } 702 703 was_busy = tdc->busy; 704 tegra_dma_stop(tdc); 705 706 if (!list_empty(&tdc->pending_sg_req) && was_busy) { 707 sgreq = list_first_entry(&tdc->pending_sg_req, 708 typeof(*sgreq), node); 709 sgreq->dma_desc->bytes_transferred += 710 get_current_xferred_count(tdc, sgreq, status); 711 } 712 tegra_dma_global_resume(tdc); 713 714 skip_dma_stop: 715 tegra_dma_abort_all(tdc); 716 717 while (!list_empty(&tdc->cb_desc)) { 718 dma_desc = list_first_entry(&tdc->cb_desc, 719 typeof(*dma_desc), cb_node); 720 list_del(&dma_desc->cb_node); 721 dma_desc->cb_count = 0; 722 } 723 spin_unlock_irqrestore(&tdc->lock, flags); 724 } 725 726 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 727 dma_cookie_t cookie, struct dma_tx_state *txstate) 728 { 729 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 730 struct tegra_dma_desc *dma_desc; 731 struct tegra_dma_sg_req *sg_req; 732 enum dma_status ret; 733 unsigned long flags; 734 unsigned int residual; 735 736 spin_lock_irqsave(&tdc->lock, flags); 737 738 ret = dma_cookie_status(dc, cookie, txstate); 739 if (ret == DMA_SUCCESS) { 740 dma_set_residue(txstate, 0); 741 spin_unlock_irqrestore(&tdc->lock, flags); 742 return ret; 743 } 744 745 /* Check on wait_ack desc status */ 746 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 747 if (dma_desc->txd.cookie == cookie) { 748 residual = dma_desc->bytes_requested - 749 (dma_desc->bytes_transferred % 750 dma_desc->bytes_requested); 751 dma_set_residue(txstate, residual); 752 ret = dma_desc->dma_status; 753 spin_unlock_irqrestore(&tdc->lock, flags); 754 return ret; 755 } 756 } 757 758 /* Check in pending list */ 759 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { 760 dma_desc = sg_req->dma_desc; 761 if (dma_desc->txd.cookie == cookie) { 762 residual = dma_desc->bytes_requested - 763 (dma_desc->bytes_transferred % 764 dma_desc->bytes_requested); 765 dma_set_residue(txstate, residual); 766 ret = dma_desc->dma_status; 767 spin_unlock_irqrestore(&tdc->lock, flags); 768 return ret; 769 } 770 } 771 772 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); 773 spin_unlock_irqrestore(&tdc->lock, flags); 774 return ret; 775 } 776 777 static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, 778 unsigned long arg) 779 { 780 switch (cmd) { 781 case DMA_SLAVE_CONFIG: 782 return tegra_dma_slave_config(dc, 783 (struct dma_slave_config *)arg); 784 785 case DMA_TERMINATE_ALL: 786 tegra_dma_terminate_all(dc); 787 return 0; 788 789 default: 790 break; 791 } 792 793 return -ENXIO; 794 } 795 796 static inline int get_bus_width(struct tegra_dma_channel *tdc, 797 enum dma_slave_buswidth slave_bw) 798 { 799 switch (slave_bw) { 800 case DMA_SLAVE_BUSWIDTH_1_BYTE: 801 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; 802 case DMA_SLAVE_BUSWIDTH_2_BYTES: 803 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; 804 case DMA_SLAVE_BUSWIDTH_4_BYTES: 805 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 806 case DMA_SLAVE_BUSWIDTH_8_BYTES: 807 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; 808 default: 809 dev_warn(tdc2dev(tdc), 810 "slave bw is not supported, using 32bits\n"); 811 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 812 } 813 } 814 815 static inline int get_burst_size(struct tegra_dma_channel *tdc, 816 u32 burst_size, enum dma_slave_buswidth slave_bw, int len) 817 { 818 int burst_byte; 819 int burst_ahb_width; 820 821 /* 822 * burst_size from client is in terms of the bus_width. 823 * convert them into AHB memory width which is 4 byte. 824 */ 825 burst_byte = burst_size * slave_bw; 826 burst_ahb_width = burst_byte / 4; 827 828 /* If burst size is 0 then calculate the burst size based on length */ 829 if (!burst_ahb_width) { 830 if (len & 0xF) 831 return TEGRA_APBDMA_AHBSEQ_BURST_1; 832 else if ((len >> 4) & 0x1) 833 return TEGRA_APBDMA_AHBSEQ_BURST_4; 834 else 835 return TEGRA_APBDMA_AHBSEQ_BURST_8; 836 } 837 if (burst_ahb_width < 4) 838 return TEGRA_APBDMA_AHBSEQ_BURST_1; 839 else if (burst_ahb_width < 8) 840 return TEGRA_APBDMA_AHBSEQ_BURST_4; 841 else 842 return TEGRA_APBDMA_AHBSEQ_BURST_8; 843 } 844 845 static int get_transfer_param(struct tegra_dma_channel *tdc, 846 enum dma_transfer_direction direction, unsigned long *apb_addr, 847 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, 848 enum dma_slave_buswidth *slave_bw) 849 { 850 851 switch (direction) { 852 case DMA_MEM_TO_DEV: 853 *apb_addr = tdc->dma_sconfig.dst_addr; 854 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 855 *burst_size = tdc->dma_sconfig.dst_maxburst; 856 *slave_bw = tdc->dma_sconfig.dst_addr_width; 857 *csr = TEGRA_APBDMA_CSR_DIR; 858 return 0; 859 860 case DMA_DEV_TO_MEM: 861 *apb_addr = tdc->dma_sconfig.src_addr; 862 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 863 *burst_size = tdc->dma_sconfig.src_maxburst; 864 *slave_bw = tdc->dma_sconfig.src_addr_width; 865 *csr = 0; 866 return 0; 867 868 default: 869 dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); 870 return -EINVAL; 871 } 872 return -EINVAL; 873 } 874 875 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( 876 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, 877 enum dma_transfer_direction direction, unsigned long flags, 878 void *context) 879 { 880 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 881 struct tegra_dma_desc *dma_desc; 882 unsigned int i; 883 struct scatterlist *sg; 884 unsigned long csr, ahb_seq, apb_ptr, apb_seq; 885 struct list_head req_list; 886 struct tegra_dma_sg_req *sg_req = NULL; 887 u32 burst_size; 888 enum dma_slave_buswidth slave_bw; 889 int ret; 890 891 if (!tdc->config_init) { 892 dev_err(tdc2dev(tdc), "dma channel is not configured\n"); 893 return NULL; 894 } 895 if (sg_len < 1) { 896 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 897 return NULL; 898 } 899 900 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 901 &burst_size, &slave_bw); 902 if (ret < 0) 903 return NULL; 904 905 INIT_LIST_HEAD(&req_list); 906 907 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 908 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 909 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 910 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 911 912 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; 913 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 914 if (flags & DMA_PREP_INTERRUPT) 915 csr |= TEGRA_APBDMA_CSR_IE_EOC; 916 917 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 918 919 dma_desc = tegra_dma_desc_get(tdc); 920 if (!dma_desc) { 921 dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); 922 return NULL; 923 } 924 INIT_LIST_HEAD(&dma_desc->tx_list); 925 INIT_LIST_HEAD(&dma_desc->cb_node); 926 dma_desc->cb_count = 0; 927 dma_desc->bytes_requested = 0; 928 dma_desc->bytes_transferred = 0; 929 dma_desc->dma_status = DMA_IN_PROGRESS; 930 931 /* Make transfer requests */ 932 for_each_sg(sgl, sg, sg_len, i) { 933 u32 len, mem; 934 935 mem = sg_dma_address(sg); 936 len = sg_dma_len(sg); 937 938 if ((len & 3) || (mem & 3) || 939 (len > tdc->tdma->chip_data->max_dma_count)) { 940 dev_err(tdc2dev(tdc), 941 "Dma length/memory address is not supported\n"); 942 tegra_dma_desc_put(tdc, dma_desc); 943 return NULL; 944 } 945 946 sg_req = tegra_dma_sg_req_get(tdc); 947 if (!sg_req) { 948 dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 949 tegra_dma_desc_put(tdc, dma_desc); 950 return NULL; 951 } 952 953 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 954 dma_desc->bytes_requested += len; 955 956 sg_req->ch_regs.apb_ptr = apb_ptr; 957 sg_req->ch_regs.ahb_ptr = mem; 958 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 959 sg_req->ch_regs.apb_seq = apb_seq; 960 sg_req->ch_regs.ahb_seq = ahb_seq; 961 sg_req->configured = false; 962 sg_req->last_sg = false; 963 sg_req->dma_desc = dma_desc; 964 sg_req->req_len = len; 965 966 list_add_tail(&sg_req->node, &dma_desc->tx_list); 967 } 968 sg_req->last_sg = true; 969 if (flags & DMA_CTRL_ACK) 970 dma_desc->txd.flags = DMA_CTRL_ACK; 971 972 /* 973 * Make sure that mode should not be conflicting with currently 974 * configured mode. 975 */ 976 if (!tdc->isr_handler) { 977 tdc->isr_handler = handle_once_dma_done; 978 tdc->cyclic = false; 979 } else { 980 if (tdc->cyclic) { 981 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); 982 tegra_dma_desc_put(tdc, dma_desc); 983 return NULL; 984 } 985 } 986 987 return &dma_desc->txd; 988 } 989 990 struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( 991 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 992 size_t period_len, enum dma_transfer_direction direction, 993 void *context) 994 { 995 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 996 struct tegra_dma_desc *dma_desc = NULL; 997 struct tegra_dma_sg_req *sg_req = NULL; 998 unsigned long csr, ahb_seq, apb_ptr, apb_seq; 999 int len; 1000 size_t remain_len; 1001 dma_addr_t mem = buf_addr; 1002 u32 burst_size; 1003 enum dma_slave_buswidth slave_bw; 1004 int ret; 1005 1006 if (!buf_len || !period_len) { 1007 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1008 return NULL; 1009 } 1010 1011 if (!tdc->config_init) { 1012 dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1013 return NULL; 1014 } 1015 1016 /* 1017 * We allow to take more number of requests till DMA is 1018 * not started. The driver will loop over all requests. 1019 * Once DMA is started then new requests can be queued only after 1020 * terminating the DMA. 1021 */ 1022 if (tdc->busy) { 1023 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); 1024 return NULL; 1025 } 1026 1027 /* 1028 * We only support cycle transfer when buf_len is multiple of 1029 * period_len. 1030 */ 1031 if (buf_len % period_len) { 1032 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1033 return NULL; 1034 } 1035 1036 len = period_len; 1037 if ((len & 3) || (buf_addr & 3) || 1038 (len > tdc->tdma->chip_data->max_dma_count)) { 1039 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1040 return NULL; 1041 } 1042 1043 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 1044 &burst_size, &slave_bw); 1045 if (ret < 0) 1046 return NULL; 1047 1048 1049 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 1050 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 1051 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1052 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1053 1054 csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; 1055 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1056 1057 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1058 1059 dma_desc = tegra_dma_desc_get(tdc); 1060 if (!dma_desc) { 1061 dev_err(tdc2dev(tdc), "not enough descriptors available\n"); 1062 return NULL; 1063 } 1064 1065 INIT_LIST_HEAD(&dma_desc->tx_list); 1066 INIT_LIST_HEAD(&dma_desc->cb_node); 1067 dma_desc->cb_count = 0; 1068 1069 dma_desc->bytes_transferred = 0; 1070 dma_desc->bytes_requested = buf_len; 1071 remain_len = buf_len; 1072 1073 /* Split transfer equal to period size */ 1074 while (remain_len) { 1075 sg_req = tegra_dma_sg_req_get(tdc); 1076 if (!sg_req) { 1077 dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 1078 tegra_dma_desc_put(tdc, dma_desc); 1079 return NULL; 1080 } 1081 1082 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1083 sg_req->ch_regs.apb_ptr = apb_ptr; 1084 sg_req->ch_regs.ahb_ptr = mem; 1085 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1086 sg_req->ch_regs.apb_seq = apb_seq; 1087 sg_req->ch_regs.ahb_seq = ahb_seq; 1088 sg_req->configured = false; 1089 sg_req->half_done = false; 1090 sg_req->last_sg = false; 1091 sg_req->dma_desc = dma_desc; 1092 sg_req->req_len = len; 1093 1094 list_add_tail(&sg_req->node, &dma_desc->tx_list); 1095 remain_len -= len; 1096 mem += len; 1097 } 1098 sg_req->last_sg = true; 1099 dma_desc->txd.flags = 0; 1100 1101 /* 1102 * Make sure that mode should not be conflicting with currently 1103 * configured mode. 1104 */ 1105 if (!tdc->isr_handler) { 1106 tdc->isr_handler = handle_cont_sngl_cycle_dma_done; 1107 tdc->cyclic = true; 1108 } else { 1109 if (!tdc->cyclic) { 1110 dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); 1111 tegra_dma_desc_put(tdc, dma_desc); 1112 return NULL; 1113 } 1114 } 1115 1116 return &dma_desc->txd; 1117 } 1118 1119 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1120 { 1121 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1122 struct tegra_dma *tdma = tdc->tdma; 1123 int ret; 1124 1125 dma_cookie_init(&tdc->dma_chan); 1126 tdc->config_init = false; 1127 ret = clk_prepare_enable(tdma->dma_clk); 1128 if (ret < 0) 1129 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); 1130 return ret; 1131 } 1132 1133 static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1134 { 1135 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1136 struct tegra_dma *tdma = tdc->tdma; 1137 1138 struct tegra_dma_desc *dma_desc; 1139 struct tegra_dma_sg_req *sg_req; 1140 struct list_head dma_desc_list; 1141 struct list_head sg_req_list; 1142 unsigned long flags; 1143 1144 INIT_LIST_HEAD(&dma_desc_list); 1145 INIT_LIST_HEAD(&sg_req_list); 1146 1147 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1148 1149 if (tdc->busy) 1150 tegra_dma_terminate_all(dc); 1151 1152 spin_lock_irqsave(&tdc->lock, flags); 1153 list_splice_init(&tdc->pending_sg_req, &sg_req_list); 1154 list_splice_init(&tdc->free_sg_req, &sg_req_list); 1155 list_splice_init(&tdc->free_dma_desc, &dma_desc_list); 1156 INIT_LIST_HEAD(&tdc->cb_desc); 1157 tdc->config_init = false; 1158 spin_unlock_irqrestore(&tdc->lock, flags); 1159 1160 while (!list_empty(&dma_desc_list)) { 1161 dma_desc = list_first_entry(&dma_desc_list, 1162 typeof(*dma_desc), node); 1163 list_del(&dma_desc->node); 1164 kfree(dma_desc); 1165 } 1166 1167 while (!list_empty(&sg_req_list)) { 1168 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); 1169 list_del(&sg_req->node); 1170 kfree(sg_req); 1171 } 1172 clk_disable_unprepare(tdma->dma_clk); 1173 } 1174 1175 /* Tegra20 specific DMA controller information */ 1176 static struct tegra_dma_chip_data tegra20_dma_chip_data = { 1177 .nr_channels = 16, 1178 .max_dma_count = 1024UL * 64, 1179 }; 1180 1181 #if defined(CONFIG_OF) 1182 /* Tegra30 specific DMA controller information */ 1183 static struct tegra_dma_chip_data tegra30_dma_chip_data = { 1184 .nr_channels = 32, 1185 .max_dma_count = 1024UL * 64, 1186 }; 1187 1188 static const struct of_device_id tegra_dma_of_match[] __devinitconst = { 1189 { 1190 .compatible = "nvidia,tegra30-apbdma", 1191 .data = &tegra30_dma_chip_data, 1192 }, { 1193 .compatible = "nvidia,tegra20-apbdma", 1194 .data = &tegra20_dma_chip_data, 1195 }, { 1196 }, 1197 }; 1198 MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1199 #endif 1200 1201 static int __devinit tegra_dma_probe(struct platform_device *pdev) 1202 { 1203 struct resource *res; 1204 struct tegra_dma *tdma; 1205 int ret; 1206 int i; 1207 struct tegra_dma_chip_data *cdata = NULL; 1208 1209 if (pdev->dev.of_node) { 1210 const struct of_device_id *match; 1211 match = of_match_device(of_match_ptr(tegra_dma_of_match), 1212 &pdev->dev); 1213 if (!match) { 1214 dev_err(&pdev->dev, "Error: No device match found\n"); 1215 return -ENODEV; 1216 } 1217 cdata = match->data; 1218 } else { 1219 /* If no device tree then fallback to tegra20 */ 1220 cdata = &tegra20_dma_chip_data; 1221 } 1222 1223 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * 1224 sizeof(struct tegra_dma_channel), GFP_KERNEL); 1225 if (!tdma) { 1226 dev_err(&pdev->dev, "Error: memory allocation failed\n"); 1227 return -ENOMEM; 1228 } 1229 1230 tdma->dev = &pdev->dev; 1231 tdma->chip_data = cdata; 1232 platform_set_drvdata(pdev, tdma); 1233 1234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1235 if (!res) { 1236 dev_err(&pdev->dev, "No mem resource for DMA\n"); 1237 return -EINVAL; 1238 } 1239 1240 tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res); 1241 if (!tdma->base_addr) { 1242 dev_err(&pdev->dev, 1243 "Cannot request memregion/iomap dma address\n"); 1244 return -EADDRNOTAVAIL; 1245 } 1246 1247 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); 1248 if (IS_ERR(tdma->dma_clk)) { 1249 dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1250 return PTR_ERR(tdma->dma_clk); 1251 } 1252 1253 spin_lock_init(&tdma->global_lock); 1254 1255 pm_runtime_enable(&pdev->dev); 1256 if (!pm_runtime_enabled(&pdev->dev)) { 1257 ret = tegra_dma_runtime_resume(&pdev->dev); 1258 if (ret) { 1259 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", 1260 ret); 1261 goto err_pm_disable; 1262 } 1263 } 1264 1265 /* Enable clock before accessing registers */ 1266 ret = clk_prepare_enable(tdma->dma_clk); 1267 if (ret < 0) { 1268 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); 1269 goto err_pm_disable; 1270 } 1271 1272 /* Reset DMA controller */ 1273 tegra_periph_reset_assert(tdma->dma_clk); 1274 udelay(2); 1275 tegra_periph_reset_deassert(tdma->dma_clk); 1276 1277 /* Enable global DMA registers */ 1278 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 1279 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1280 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1281 1282 clk_disable_unprepare(tdma->dma_clk); 1283 1284 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1285 for (i = 0; i < cdata->nr_channels; i++) { 1286 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1287 char irq_name[30]; 1288 1289 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1290 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1291 1292 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1293 if (!res) { 1294 ret = -EINVAL; 1295 dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1296 goto err_irq; 1297 } 1298 tdc->irq = res->start; 1299 snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); 1300 ret = devm_request_irq(&pdev->dev, tdc->irq, 1301 tegra_dma_isr, 0, irq_name, tdc); 1302 if (ret) { 1303 dev_err(&pdev->dev, 1304 "request_irq failed with err %d channel %d\n", 1305 i, ret); 1306 goto err_irq; 1307 } 1308 1309 tdc->dma_chan.device = &tdma->dma_dev; 1310 dma_cookie_init(&tdc->dma_chan); 1311 list_add_tail(&tdc->dma_chan.device_node, 1312 &tdma->dma_dev.channels); 1313 tdc->tdma = tdma; 1314 tdc->id = i; 1315 1316 tasklet_init(&tdc->tasklet, tegra_dma_tasklet, 1317 (unsigned long)tdc); 1318 spin_lock_init(&tdc->lock); 1319 1320 INIT_LIST_HEAD(&tdc->pending_sg_req); 1321 INIT_LIST_HEAD(&tdc->free_sg_req); 1322 INIT_LIST_HEAD(&tdc->free_dma_desc); 1323 INIT_LIST_HEAD(&tdc->cb_desc); 1324 } 1325 1326 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1327 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1328 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1329 1330 tdma->dma_dev.dev = &pdev->dev; 1331 tdma->dma_dev.device_alloc_chan_resources = 1332 tegra_dma_alloc_chan_resources; 1333 tdma->dma_dev.device_free_chan_resources = 1334 tegra_dma_free_chan_resources; 1335 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1336 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1337 tdma->dma_dev.device_control = tegra_dma_device_control; 1338 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1339 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1340 1341 ret = dma_async_device_register(&tdma->dma_dev); 1342 if (ret < 0) { 1343 dev_err(&pdev->dev, 1344 "Tegra20 APB DMA driver registration failed %d\n", ret); 1345 goto err_irq; 1346 } 1347 1348 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", 1349 cdata->nr_channels); 1350 return 0; 1351 1352 err_irq: 1353 while (--i >= 0) { 1354 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1355 tasklet_kill(&tdc->tasklet); 1356 } 1357 1358 err_pm_disable: 1359 pm_runtime_disable(&pdev->dev); 1360 if (!pm_runtime_status_suspended(&pdev->dev)) 1361 tegra_dma_runtime_suspend(&pdev->dev); 1362 return ret; 1363 } 1364 1365 static int __devexit tegra_dma_remove(struct platform_device *pdev) 1366 { 1367 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1368 int i; 1369 struct tegra_dma_channel *tdc; 1370 1371 dma_async_device_unregister(&tdma->dma_dev); 1372 1373 for (i = 0; i < tdma->chip_data->nr_channels; ++i) { 1374 tdc = &tdma->channels[i]; 1375 tasklet_kill(&tdc->tasklet); 1376 } 1377 1378 pm_runtime_disable(&pdev->dev); 1379 if (!pm_runtime_status_suspended(&pdev->dev)) 1380 tegra_dma_runtime_suspend(&pdev->dev); 1381 1382 return 0; 1383 } 1384 1385 static int tegra_dma_runtime_suspend(struct device *dev) 1386 { 1387 struct platform_device *pdev = to_platform_device(dev); 1388 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1389 1390 clk_disable_unprepare(tdma->dma_clk); 1391 return 0; 1392 } 1393 1394 static int tegra_dma_runtime_resume(struct device *dev) 1395 { 1396 struct platform_device *pdev = to_platform_device(dev); 1397 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1398 int ret; 1399 1400 ret = clk_prepare_enable(tdma->dma_clk); 1401 if (ret < 0) { 1402 dev_err(dev, "clk_enable failed: %d\n", ret); 1403 return ret; 1404 } 1405 return 0; 1406 } 1407 1408 static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = { 1409 #ifdef CONFIG_PM_RUNTIME 1410 .runtime_suspend = tegra_dma_runtime_suspend, 1411 .runtime_resume = tegra_dma_runtime_resume, 1412 #endif 1413 }; 1414 1415 static struct platform_driver tegra_dmac_driver = { 1416 .driver = { 1417 .name = "tegra-apbdma", 1418 .owner = THIS_MODULE, 1419 .pm = &tegra_dma_dev_pm_ops, 1420 .of_match_table = of_match_ptr(tegra_dma_of_match), 1421 }, 1422 .probe = tegra_dma_probe, 1423 .remove = __devexit_p(tegra_dma_remove), 1424 }; 1425 1426 module_platform_driver(tegra_dmac_driver); 1427 1428 MODULE_ALIAS("platform:tegra20-apbdma"); 1429 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); 1430 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1431 MODULE_LICENSE("GPL v2"); 1432