1 /* 2 * DMA driver for Nvidia's Tegra20 APB DMA controller. 3 * 4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/bitops.h> 20 #include <linux/clk.h> 21 #include <linux/delay.h> 22 #include <linux/dmaengine.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/init.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/mm.h> 28 #include <linux/module.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/platform_device.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/slab.h> 34 35 #include <mach/clk.h> 36 #include "dmaengine.h" 37 38 #define TEGRA_APBDMA_GENERAL 0x0 39 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) 40 41 #define TEGRA_APBDMA_CONTROL 0x010 42 #define TEGRA_APBDMA_IRQ_MASK 0x01c 43 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 44 45 /* CSR register */ 46 #define TEGRA_APBDMA_CHAN_CSR 0x00 47 #define TEGRA_APBDMA_CSR_ENB BIT(31) 48 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) 49 #define TEGRA_APBDMA_CSR_HOLD BIT(29) 50 #define TEGRA_APBDMA_CSR_DIR BIT(28) 51 #define TEGRA_APBDMA_CSR_ONCE BIT(27) 52 #define TEGRA_APBDMA_CSR_FLOW BIT(21) 53 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 54 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC 55 56 /* STATUS register */ 57 #define TEGRA_APBDMA_CHAN_STATUS 0x004 58 #define TEGRA_APBDMA_STATUS_BUSY BIT(31) 59 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) 60 #define TEGRA_APBDMA_STATUS_HALT BIT(29) 61 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) 62 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 63 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC 64 65 /* AHB memory address */ 66 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 67 68 /* AHB sequence register */ 69 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 70 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) 71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) 72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) 73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) 74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) 75 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) 76 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) 77 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) 78 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) 79 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) 80 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) 81 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 82 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 83 84 /* APB address */ 85 #define TEGRA_APBDMA_CHAN_APBPTR 0x018 86 87 /* APB sequence register */ 88 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c 89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) 90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) 91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) 92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) 93 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) 94 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 95 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 96 97 /* 98 * If any burst is in flight and DMA paused then this is the time to complete 99 * on-flight burst and update DMA status register. 100 */ 101 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 102 103 /* Channel base address offset from APBDMA base address */ 104 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 105 106 /* DMA channel register space size */ 107 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 108 109 struct tegra_dma; 110 111 /* 112 * tegra_dma_chip_data Tegra chip specific DMA data 113 * @nr_channels: Number of channels available in the controller. 114 * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 115 */ 116 struct tegra_dma_chip_data { 117 int nr_channels; 118 int max_dma_count; 119 }; 120 121 /* DMA channel registers */ 122 struct tegra_dma_channel_regs { 123 unsigned long csr; 124 unsigned long ahb_ptr; 125 unsigned long apb_ptr; 126 unsigned long ahb_seq; 127 unsigned long apb_seq; 128 }; 129 130 /* 131 * tegra_dma_sg_req: Dma request details to configure hardware. This 132 * contains the details for one transfer to configure DMA hw. 133 * The client's request for data transfer can be broken into multiple 134 * sub-transfer as per requester details and hw support. 135 * This sub transfer get added in the list of transfer and point to Tegra 136 * DMA descriptor which manages the transfer details. 137 */ 138 struct tegra_dma_sg_req { 139 struct tegra_dma_channel_regs ch_regs; 140 int req_len; 141 bool configured; 142 bool last_sg; 143 bool half_done; 144 struct list_head node; 145 struct tegra_dma_desc *dma_desc; 146 }; 147 148 /* 149 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. 150 * This descriptor keep track of transfer status, callbacks and request 151 * counts etc. 152 */ 153 struct tegra_dma_desc { 154 struct dma_async_tx_descriptor txd; 155 int bytes_requested; 156 int bytes_transferred; 157 enum dma_status dma_status; 158 struct list_head node; 159 struct list_head tx_list; 160 struct list_head cb_node; 161 int cb_count; 162 }; 163 164 struct tegra_dma_channel; 165 166 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, 167 bool to_terminate); 168 169 /* tegra_dma_channel: Channel specific information */ 170 struct tegra_dma_channel { 171 struct dma_chan dma_chan; 172 char name[30]; 173 bool config_init; 174 int id; 175 int irq; 176 unsigned long chan_base_offset; 177 spinlock_t lock; 178 bool busy; 179 struct tegra_dma *tdma; 180 bool cyclic; 181 182 /* Different lists for managing the requests */ 183 struct list_head free_sg_req; 184 struct list_head pending_sg_req; 185 struct list_head free_dma_desc; 186 struct list_head cb_desc; 187 188 /* ISR handler and tasklet for bottom half of isr handling */ 189 dma_isr_handler isr_handler; 190 struct tasklet_struct tasklet; 191 dma_async_tx_callback callback; 192 void *callback_param; 193 194 /* Channel-slave specific configuration */ 195 struct dma_slave_config dma_sconfig; 196 }; 197 198 /* tegra_dma: Tegra DMA specific information */ 199 struct tegra_dma { 200 struct dma_device dma_dev; 201 struct device *dev; 202 struct clk *dma_clk; 203 spinlock_t global_lock; 204 void __iomem *base_addr; 205 const struct tegra_dma_chip_data *chip_data; 206 207 /* Some register need to be cache before suspend */ 208 u32 reg_gen; 209 210 /* Last member of the structure */ 211 struct tegra_dma_channel channels[0]; 212 }; 213 214 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) 215 { 216 writel(val, tdma->base_addr + reg); 217 } 218 219 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) 220 { 221 return readl(tdma->base_addr + reg); 222 } 223 224 static inline void tdc_write(struct tegra_dma_channel *tdc, 225 u32 reg, u32 val) 226 { 227 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); 228 } 229 230 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 231 { 232 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); 233 } 234 235 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 236 { 237 return container_of(dc, struct tegra_dma_channel, dma_chan); 238 } 239 240 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( 241 struct dma_async_tx_descriptor *td) 242 { 243 return container_of(td, struct tegra_dma_desc, txd); 244 } 245 246 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 247 { 248 return &tdc->dma_chan.dev->device; 249 } 250 251 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); 252 static int tegra_dma_runtime_suspend(struct device *dev); 253 static int tegra_dma_runtime_resume(struct device *dev); 254 255 /* Get DMA desc from free list, if not there then allocate it. */ 256 static struct tegra_dma_desc *tegra_dma_desc_get( 257 struct tegra_dma_channel *tdc) 258 { 259 struct tegra_dma_desc *dma_desc; 260 unsigned long flags; 261 262 spin_lock_irqsave(&tdc->lock, flags); 263 264 /* Do not allocate if desc are waiting for ack */ 265 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 266 if (async_tx_test_ack(&dma_desc->txd)) { 267 list_del(&dma_desc->node); 268 spin_unlock_irqrestore(&tdc->lock, flags); 269 return dma_desc; 270 } 271 } 272 273 spin_unlock_irqrestore(&tdc->lock, flags); 274 275 /* Allocate DMA desc */ 276 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); 277 if (!dma_desc) { 278 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); 279 return NULL; 280 } 281 282 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); 283 dma_desc->txd.tx_submit = tegra_dma_tx_submit; 284 dma_desc->txd.flags = 0; 285 return dma_desc; 286 } 287 288 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, 289 struct tegra_dma_desc *dma_desc) 290 { 291 unsigned long flags; 292 293 spin_lock_irqsave(&tdc->lock, flags); 294 if (!list_empty(&dma_desc->tx_list)) 295 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); 296 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 297 spin_unlock_irqrestore(&tdc->lock, flags); 298 } 299 300 static struct tegra_dma_sg_req *tegra_dma_sg_req_get( 301 struct tegra_dma_channel *tdc) 302 { 303 struct tegra_dma_sg_req *sg_req = NULL; 304 unsigned long flags; 305 306 spin_lock_irqsave(&tdc->lock, flags); 307 if (!list_empty(&tdc->free_sg_req)) { 308 sg_req = list_first_entry(&tdc->free_sg_req, 309 typeof(*sg_req), node); 310 list_del(&sg_req->node); 311 spin_unlock_irqrestore(&tdc->lock, flags); 312 return sg_req; 313 } 314 spin_unlock_irqrestore(&tdc->lock, flags); 315 316 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); 317 if (!sg_req) 318 dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); 319 return sg_req; 320 } 321 322 static int tegra_dma_slave_config(struct dma_chan *dc, 323 struct dma_slave_config *sconfig) 324 { 325 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 326 327 if (!list_empty(&tdc->pending_sg_req)) { 328 dev_err(tdc2dev(tdc), "Configuration not allowed\n"); 329 return -EBUSY; 330 } 331 332 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 333 tdc->config_init = true; 334 return 0; 335 } 336 337 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, 338 bool wait_for_burst_complete) 339 { 340 struct tegra_dma *tdma = tdc->tdma; 341 342 spin_lock(&tdma->global_lock); 343 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); 344 if (wait_for_burst_complete) 345 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 346 } 347 348 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) 349 { 350 struct tegra_dma *tdma = tdc->tdma; 351 352 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 353 spin_unlock(&tdma->global_lock); 354 } 355 356 static void tegra_dma_stop(struct tegra_dma_channel *tdc) 357 { 358 u32 csr; 359 u32 status; 360 361 /* Disable interrupts */ 362 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 363 csr &= ~TEGRA_APBDMA_CSR_IE_EOC; 364 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 365 366 /* Disable DMA */ 367 csr &= ~TEGRA_APBDMA_CSR_ENB; 368 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 369 370 /* Clear interrupt status if it is there */ 371 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 372 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 373 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 374 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 375 } 376 tdc->busy = false; 377 } 378 379 static void tegra_dma_start(struct tegra_dma_channel *tdc, 380 struct tegra_dma_sg_req *sg_req) 381 { 382 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; 383 384 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); 385 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); 386 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 387 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 388 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 389 390 /* Start DMA */ 391 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 392 ch_regs->csr | TEGRA_APBDMA_CSR_ENB); 393 } 394 395 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, 396 struct tegra_dma_sg_req *nsg_req) 397 { 398 unsigned long status; 399 400 /* 401 * The DMA controller reloads the new configuration for next transfer 402 * after last burst of current transfer completes. 403 * If there is no IEC status then this makes sure that last burst 404 * has not be completed. There may be case that last burst is on 405 * flight and so it can complete but because DMA is paused, it 406 * will not generates interrupt as well as not reload the new 407 * configuration. 408 * If there is already IEC status then interrupt handler need to 409 * load new configuration. 410 */ 411 tegra_dma_global_pause(tdc, false); 412 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 413 414 /* 415 * If interrupt is pending then do nothing as the ISR will handle 416 * the programing for new request. 417 */ 418 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 419 dev_err(tdc2dev(tdc), 420 "Skipping new configuration as interrupt is pending\n"); 421 tegra_dma_global_resume(tdc); 422 return; 423 } 424 425 /* Safe to program new configuration */ 426 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 427 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 428 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 429 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 430 nsg_req->configured = true; 431 432 tegra_dma_global_resume(tdc); 433 } 434 435 static void tdc_start_head_req(struct tegra_dma_channel *tdc) 436 { 437 struct tegra_dma_sg_req *sg_req; 438 439 if (list_empty(&tdc->pending_sg_req)) 440 return; 441 442 sg_req = list_first_entry(&tdc->pending_sg_req, 443 typeof(*sg_req), node); 444 tegra_dma_start(tdc, sg_req); 445 sg_req->configured = true; 446 tdc->busy = true; 447 } 448 449 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) 450 { 451 struct tegra_dma_sg_req *hsgreq; 452 struct tegra_dma_sg_req *hnsgreq; 453 454 if (list_empty(&tdc->pending_sg_req)) 455 return; 456 457 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 458 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { 459 hnsgreq = list_first_entry(&hsgreq->node, 460 typeof(*hnsgreq), node); 461 tegra_dma_configure_for_next(tdc, hnsgreq); 462 } 463 } 464 465 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, 466 struct tegra_dma_sg_req *sg_req, unsigned long status) 467 { 468 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; 469 } 470 471 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) 472 { 473 struct tegra_dma_sg_req *sgreq; 474 struct tegra_dma_desc *dma_desc; 475 476 while (!list_empty(&tdc->pending_sg_req)) { 477 sgreq = list_first_entry(&tdc->pending_sg_req, 478 typeof(*sgreq), node); 479 list_move_tail(&sgreq->node, &tdc->free_sg_req); 480 if (sgreq->last_sg) { 481 dma_desc = sgreq->dma_desc; 482 dma_desc->dma_status = DMA_ERROR; 483 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 484 485 /* Add in cb list if it is not there. */ 486 if (!dma_desc->cb_count) 487 list_add_tail(&dma_desc->cb_node, 488 &tdc->cb_desc); 489 dma_desc->cb_count++; 490 } 491 } 492 tdc->isr_handler = NULL; 493 } 494 495 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, 496 struct tegra_dma_sg_req *last_sg_req, bool to_terminate) 497 { 498 struct tegra_dma_sg_req *hsgreq = NULL; 499 500 if (list_empty(&tdc->pending_sg_req)) { 501 dev_err(tdc2dev(tdc), "Dma is running without req\n"); 502 tegra_dma_stop(tdc); 503 return false; 504 } 505 506 /* 507 * Check that head req on list should be in flight. 508 * If it is not in flight then abort transfer as 509 * looping of transfer can not continue. 510 */ 511 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 512 if (!hsgreq->configured) { 513 tegra_dma_stop(tdc); 514 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); 515 tegra_dma_abort_all(tdc); 516 return false; 517 } 518 519 /* Configure next request */ 520 if (!to_terminate) 521 tdc_configure_next_head_desc(tdc); 522 return true; 523 } 524 525 static void handle_once_dma_done(struct tegra_dma_channel *tdc, 526 bool to_terminate) 527 { 528 struct tegra_dma_sg_req *sgreq; 529 struct tegra_dma_desc *dma_desc; 530 531 tdc->busy = false; 532 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 533 dma_desc = sgreq->dma_desc; 534 dma_desc->bytes_transferred += sgreq->req_len; 535 536 list_del(&sgreq->node); 537 if (sgreq->last_sg) { 538 dma_desc->dma_status = DMA_SUCCESS; 539 dma_cookie_complete(&dma_desc->txd); 540 if (!dma_desc->cb_count) 541 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 542 dma_desc->cb_count++; 543 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 544 } 545 list_add_tail(&sgreq->node, &tdc->free_sg_req); 546 547 /* Do not start DMA if it is going to be terminate */ 548 if (to_terminate || list_empty(&tdc->pending_sg_req)) 549 return; 550 551 tdc_start_head_req(tdc); 552 return; 553 } 554 555 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, 556 bool to_terminate) 557 { 558 struct tegra_dma_sg_req *sgreq; 559 struct tegra_dma_desc *dma_desc; 560 bool st; 561 562 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 563 dma_desc = sgreq->dma_desc; 564 dma_desc->bytes_transferred += sgreq->req_len; 565 566 /* Callback need to be call */ 567 if (!dma_desc->cb_count) 568 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 569 dma_desc->cb_count++; 570 571 /* If not last req then put at end of pending list */ 572 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 573 list_move_tail(&sgreq->node, &tdc->pending_sg_req); 574 sgreq->configured = false; 575 st = handle_continuous_head_request(tdc, sgreq, to_terminate); 576 if (!st) 577 dma_desc->dma_status = DMA_ERROR; 578 } 579 return; 580 } 581 582 static void tegra_dma_tasklet(unsigned long data) 583 { 584 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; 585 dma_async_tx_callback callback = NULL; 586 void *callback_param = NULL; 587 struct tegra_dma_desc *dma_desc; 588 unsigned long flags; 589 int cb_count; 590 591 spin_lock_irqsave(&tdc->lock, flags); 592 while (!list_empty(&tdc->cb_desc)) { 593 dma_desc = list_first_entry(&tdc->cb_desc, 594 typeof(*dma_desc), cb_node); 595 list_del(&dma_desc->cb_node); 596 callback = dma_desc->txd.callback; 597 callback_param = dma_desc->txd.callback_param; 598 cb_count = dma_desc->cb_count; 599 dma_desc->cb_count = 0; 600 spin_unlock_irqrestore(&tdc->lock, flags); 601 while (cb_count-- && callback) 602 callback(callback_param); 603 spin_lock_irqsave(&tdc->lock, flags); 604 } 605 spin_unlock_irqrestore(&tdc->lock, flags); 606 } 607 608 static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 609 { 610 struct tegra_dma_channel *tdc = dev_id; 611 unsigned long status; 612 unsigned long flags; 613 614 spin_lock_irqsave(&tdc->lock, flags); 615 616 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 617 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 618 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 619 tdc->isr_handler(tdc, false); 620 tasklet_schedule(&tdc->tasklet); 621 spin_unlock_irqrestore(&tdc->lock, flags); 622 return IRQ_HANDLED; 623 } 624 625 spin_unlock_irqrestore(&tdc->lock, flags); 626 dev_info(tdc2dev(tdc), 627 "Interrupt already served status 0x%08lx\n", status); 628 return IRQ_NONE; 629 } 630 631 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) 632 { 633 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); 634 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); 635 unsigned long flags; 636 dma_cookie_t cookie; 637 638 spin_lock_irqsave(&tdc->lock, flags); 639 dma_desc->dma_status = DMA_IN_PROGRESS; 640 cookie = dma_cookie_assign(&dma_desc->txd); 641 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); 642 spin_unlock_irqrestore(&tdc->lock, flags); 643 return cookie; 644 } 645 646 static void tegra_dma_issue_pending(struct dma_chan *dc) 647 { 648 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 649 unsigned long flags; 650 651 spin_lock_irqsave(&tdc->lock, flags); 652 if (list_empty(&tdc->pending_sg_req)) { 653 dev_err(tdc2dev(tdc), "No DMA request\n"); 654 goto end; 655 } 656 if (!tdc->busy) { 657 tdc_start_head_req(tdc); 658 659 /* Continuous single mode: Configure next req */ 660 if (tdc->cyclic) { 661 /* 662 * Wait for 1 burst time for configure DMA for 663 * next transfer. 664 */ 665 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 666 tdc_configure_next_head_desc(tdc); 667 } 668 } 669 end: 670 spin_unlock_irqrestore(&tdc->lock, flags); 671 return; 672 } 673 674 static void tegra_dma_terminate_all(struct dma_chan *dc) 675 { 676 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 677 struct tegra_dma_sg_req *sgreq; 678 struct tegra_dma_desc *dma_desc; 679 unsigned long flags; 680 unsigned long status; 681 bool was_busy; 682 683 spin_lock_irqsave(&tdc->lock, flags); 684 if (list_empty(&tdc->pending_sg_req)) { 685 spin_unlock_irqrestore(&tdc->lock, flags); 686 return; 687 } 688 689 if (!tdc->busy) 690 goto skip_dma_stop; 691 692 /* Pause DMA before checking the queue status */ 693 tegra_dma_global_pause(tdc, true); 694 695 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 696 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 697 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); 698 tdc->isr_handler(tdc, true); 699 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 700 } 701 702 was_busy = tdc->busy; 703 tegra_dma_stop(tdc); 704 705 if (!list_empty(&tdc->pending_sg_req) && was_busy) { 706 sgreq = list_first_entry(&tdc->pending_sg_req, 707 typeof(*sgreq), node); 708 sgreq->dma_desc->bytes_transferred += 709 get_current_xferred_count(tdc, sgreq, status); 710 } 711 tegra_dma_global_resume(tdc); 712 713 skip_dma_stop: 714 tegra_dma_abort_all(tdc); 715 716 while (!list_empty(&tdc->cb_desc)) { 717 dma_desc = list_first_entry(&tdc->cb_desc, 718 typeof(*dma_desc), cb_node); 719 list_del(&dma_desc->cb_node); 720 dma_desc->cb_count = 0; 721 } 722 spin_unlock_irqrestore(&tdc->lock, flags); 723 } 724 725 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 726 dma_cookie_t cookie, struct dma_tx_state *txstate) 727 { 728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 729 struct tegra_dma_desc *dma_desc; 730 struct tegra_dma_sg_req *sg_req; 731 enum dma_status ret; 732 unsigned long flags; 733 unsigned int residual; 734 735 spin_lock_irqsave(&tdc->lock, flags); 736 737 ret = dma_cookie_status(dc, cookie, txstate); 738 if (ret == DMA_SUCCESS) { 739 dma_set_residue(txstate, 0); 740 spin_unlock_irqrestore(&tdc->lock, flags); 741 return ret; 742 } 743 744 /* Check on wait_ack desc status */ 745 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 746 if (dma_desc->txd.cookie == cookie) { 747 residual = dma_desc->bytes_requested - 748 (dma_desc->bytes_transferred % 749 dma_desc->bytes_requested); 750 dma_set_residue(txstate, residual); 751 ret = dma_desc->dma_status; 752 spin_unlock_irqrestore(&tdc->lock, flags); 753 return ret; 754 } 755 } 756 757 /* Check in pending list */ 758 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { 759 dma_desc = sg_req->dma_desc; 760 if (dma_desc->txd.cookie == cookie) { 761 residual = dma_desc->bytes_requested - 762 (dma_desc->bytes_transferred % 763 dma_desc->bytes_requested); 764 dma_set_residue(txstate, residual); 765 ret = dma_desc->dma_status; 766 spin_unlock_irqrestore(&tdc->lock, flags); 767 return ret; 768 } 769 } 770 771 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); 772 spin_unlock_irqrestore(&tdc->lock, flags); 773 return ret; 774 } 775 776 static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, 777 unsigned long arg) 778 { 779 switch (cmd) { 780 case DMA_SLAVE_CONFIG: 781 return tegra_dma_slave_config(dc, 782 (struct dma_slave_config *)arg); 783 784 case DMA_TERMINATE_ALL: 785 tegra_dma_terminate_all(dc); 786 return 0; 787 788 default: 789 break; 790 } 791 792 return -ENXIO; 793 } 794 795 static inline int get_bus_width(struct tegra_dma_channel *tdc, 796 enum dma_slave_buswidth slave_bw) 797 { 798 switch (slave_bw) { 799 case DMA_SLAVE_BUSWIDTH_1_BYTE: 800 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; 801 case DMA_SLAVE_BUSWIDTH_2_BYTES: 802 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; 803 case DMA_SLAVE_BUSWIDTH_4_BYTES: 804 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 805 case DMA_SLAVE_BUSWIDTH_8_BYTES: 806 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; 807 default: 808 dev_warn(tdc2dev(tdc), 809 "slave bw is not supported, using 32bits\n"); 810 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 811 } 812 } 813 814 static inline int get_burst_size(struct tegra_dma_channel *tdc, 815 u32 burst_size, enum dma_slave_buswidth slave_bw, int len) 816 { 817 int burst_byte; 818 int burst_ahb_width; 819 820 /* 821 * burst_size from client is in terms of the bus_width. 822 * convert them into AHB memory width which is 4 byte. 823 */ 824 burst_byte = burst_size * slave_bw; 825 burst_ahb_width = burst_byte / 4; 826 827 /* If burst size is 0 then calculate the burst size based on length */ 828 if (!burst_ahb_width) { 829 if (len & 0xF) 830 return TEGRA_APBDMA_AHBSEQ_BURST_1; 831 else if ((len >> 4) & 0x1) 832 return TEGRA_APBDMA_AHBSEQ_BURST_4; 833 else 834 return TEGRA_APBDMA_AHBSEQ_BURST_8; 835 } 836 if (burst_ahb_width < 4) 837 return TEGRA_APBDMA_AHBSEQ_BURST_1; 838 else if (burst_ahb_width < 8) 839 return TEGRA_APBDMA_AHBSEQ_BURST_4; 840 else 841 return TEGRA_APBDMA_AHBSEQ_BURST_8; 842 } 843 844 static int get_transfer_param(struct tegra_dma_channel *tdc, 845 enum dma_transfer_direction direction, unsigned long *apb_addr, 846 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, 847 enum dma_slave_buswidth *slave_bw) 848 { 849 850 switch (direction) { 851 case DMA_MEM_TO_DEV: 852 *apb_addr = tdc->dma_sconfig.dst_addr; 853 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 854 *burst_size = tdc->dma_sconfig.dst_maxburst; 855 *slave_bw = tdc->dma_sconfig.dst_addr_width; 856 *csr = TEGRA_APBDMA_CSR_DIR; 857 return 0; 858 859 case DMA_DEV_TO_MEM: 860 *apb_addr = tdc->dma_sconfig.src_addr; 861 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 862 *burst_size = tdc->dma_sconfig.src_maxburst; 863 *slave_bw = tdc->dma_sconfig.src_addr_width; 864 *csr = 0; 865 return 0; 866 867 default: 868 dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); 869 return -EINVAL; 870 } 871 return -EINVAL; 872 } 873 874 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( 875 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, 876 enum dma_transfer_direction direction, unsigned long flags, 877 void *context) 878 { 879 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 880 struct tegra_dma_desc *dma_desc; 881 unsigned int i; 882 struct scatterlist *sg; 883 unsigned long csr, ahb_seq, apb_ptr, apb_seq; 884 struct list_head req_list; 885 struct tegra_dma_sg_req *sg_req = NULL; 886 u32 burst_size; 887 enum dma_slave_buswidth slave_bw; 888 int ret; 889 890 if (!tdc->config_init) { 891 dev_err(tdc2dev(tdc), "dma channel is not configured\n"); 892 return NULL; 893 } 894 if (sg_len < 1) { 895 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 896 return NULL; 897 } 898 899 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 900 &burst_size, &slave_bw); 901 if (ret < 0) 902 return NULL; 903 904 INIT_LIST_HEAD(&req_list); 905 906 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 907 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 908 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 909 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 910 911 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; 912 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 913 if (flags & DMA_PREP_INTERRUPT) 914 csr |= TEGRA_APBDMA_CSR_IE_EOC; 915 916 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 917 918 dma_desc = tegra_dma_desc_get(tdc); 919 if (!dma_desc) { 920 dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); 921 return NULL; 922 } 923 INIT_LIST_HEAD(&dma_desc->tx_list); 924 INIT_LIST_HEAD(&dma_desc->cb_node); 925 dma_desc->cb_count = 0; 926 dma_desc->bytes_requested = 0; 927 dma_desc->bytes_transferred = 0; 928 dma_desc->dma_status = DMA_IN_PROGRESS; 929 930 /* Make transfer requests */ 931 for_each_sg(sgl, sg, sg_len, i) { 932 u32 len, mem; 933 934 mem = sg_dma_address(sg); 935 len = sg_dma_len(sg); 936 937 if ((len & 3) || (mem & 3) || 938 (len > tdc->tdma->chip_data->max_dma_count)) { 939 dev_err(tdc2dev(tdc), 940 "Dma length/memory address is not supported\n"); 941 tegra_dma_desc_put(tdc, dma_desc); 942 return NULL; 943 } 944 945 sg_req = tegra_dma_sg_req_get(tdc); 946 if (!sg_req) { 947 dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 948 tegra_dma_desc_put(tdc, dma_desc); 949 return NULL; 950 } 951 952 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 953 dma_desc->bytes_requested += len; 954 955 sg_req->ch_regs.apb_ptr = apb_ptr; 956 sg_req->ch_regs.ahb_ptr = mem; 957 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 958 sg_req->ch_regs.apb_seq = apb_seq; 959 sg_req->ch_regs.ahb_seq = ahb_seq; 960 sg_req->configured = false; 961 sg_req->last_sg = false; 962 sg_req->dma_desc = dma_desc; 963 sg_req->req_len = len; 964 965 list_add_tail(&sg_req->node, &dma_desc->tx_list); 966 } 967 sg_req->last_sg = true; 968 if (flags & DMA_CTRL_ACK) 969 dma_desc->txd.flags = DMA_CTRL_ACK; 970 971 /* 972 * Make sure that mode should not be conflicting with currently 973 * configured mode. 974 */ 975 if (!tdc->isr_handler) { 976 tdc->isr_handler = handle_once_dma_done; 977 tdc->cyclic = false; 978 } else { 979 if (tdc->cyclic) { 980 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); 981 tegra_dma_desc_put(tdc, dma_desc); 982 return NULL; 983 } 984 } 985 986 return &dma_desc->txd; 987 } 988 989 struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( 990 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 991 size_t period_len, enum dma_transfer_direction direction, 992 unsigned long flags, void *context) 993 { 994 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 995 struct tegra_dma_desc *dma_desc = NULL; 996 struct tegra_dma_sg_req *sg_req = NULL; 997 unsigned long csr, ahb_seq, apb_ptr, apb_seq; 998 int len; 999 size_t remain_len; 1000 dma_addr_t mem = buf_addr; 1001 u32 burst_size; 1002 enum dma_slave_buswidth slave_bw; 1003 int ret; 1004 1005 if (!buf_len || !period_len) { 1006 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1007 return NULL; 1008 } 1009 1010 if (!tdc->config_init) { 1011 dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1012 return NULL; 1013 } 1014 1015 /* 1016 * We allow to take more number of requests till DMA is 1017 * not started. The driver will loop over all requests. 1018 * Once DMA is started then new requests can be queued only after 1019 * terminating the DMA. 1020 */ 1021 if (tdc->busy) { 1022 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); 1023 return NULL; 1024 } 1025 1026 /* 1027 * We only support cycle transfer when buf_len is multiple of 1028 * period_len. 1029 */ 1030 if (buf_len % period_len) { 1031 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1032 return NULL; 1033 } 1034 1035 len = period_len; 1036 if ((len & 3) || (buf_addr & 3) || 1037 (len > tdc->tdma->chip_data->max_dma_count)) { 1038 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1039 return NULL; 1040 } 1041 1042 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 1043 &burst_size, &slave_bw); 1044 if (ret < 0) 1045 return NULL; 1046 1047 1048 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 1049 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 1050 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1051 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1052 1053 csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; 1054 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1055 1056 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1057 1058 dma_desc = tegra_dma_desc_get(tdc); 1059 if (!dma_desc) { 1060 dev_err(tdc2dev(tdc), "not enough descriptors available\n"); 1061 return NULL; 1062 } 1063 1064 INIT_LIST_HEAD(&dma_desc->tx_list); 1065 INIT_LIST_HEAD(&dma_desc->cb_node); 1066 dma_desc->cb_count = 0; 1067 1068 dma_desc->bytes_transferred = 0; 1069 dma_desc->bytes_requested = buf_len; 1070 remain_len = buf_len; 1071 1072 /* Split transfer equal to period size */ 1073 while (remain_len) { 1074 sg_req = tegra_dma_sg_req_get(tdc); 1075 if (!sg_req) { 1076 dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 1077 tegra_dma_desc_put(tdc, dma_desc); 1078 return NULL; 1079 } 1080 1081 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1082 sg_req->ch_regs.apb_ptr = apb_ptr; 1083 sg_req->ch_regs.ahb_ptr = mem; 1084 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1085 sg_req->ch_regs.apb_seq = apb_seq; 1086 sg_req->ch_regs.ahb_seq = ahb_seq; 1087 sg_req->configured = false; 1088 sg_req->half_done = false; 1089 sg_req->last_sg = false; 1090 sg_req->dma_desc = dma_desc; 1091 sg_req->req_len = len; 1092 1093 list_add_tail(&sg_req->node, &dma_desc->tx_list); 1094 remain_len -= len; 1095 mem += len; 1096 } 1097 sg_req->last_sg = true; 1098 dma_desc->txd.flags = 0; 1099 1100 /* 1101 * Make sure that mode should not be conflicting with currently 1102 * configured mode. 1103 */ 1104 if (!tdc->isr_handler) { 1105 tdc->isr_handler = handle_cont_sngl_cycle_dma_done; 1106 tdc->cyclic = true; 1107 } else { 1108 if (!tdc->cyclic) { 1109 dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); 1110 tegra_dma_desc_put(tdc, dma_desc); 1111 return NULL; 1112 } 1113 } 1114 1115 return &dma_desc->txd; 1116 } 1117 1118 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1119 { 1120 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1121 struct tegra_dma *tdma = tdc->tdma; 1122 int ret; 1123 1124 dma_cookie_init(&tdc->dma_chan); 1125 tdc->config_init = false; 1126 ret = clk_prepare_enable(tdma->dma_clk); 1127 if (ret < 0) 1128 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); 1129 return ret; 1130 } 1131 1132 static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1133 { 1134 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1135 struct tegra_dma *tdma = tdc->tdma; 1136 1137 struct tegra_dma_desc *dma_desc; 1138 struct tegra_dma_sg_req *sg_req; 1139 struct list_head dma_desc_list; 1140 struct list_head sg_req_list; 1141 unsigned long flags; 1142 1143 INIT_LIST_HEAD(&dma_desc_list); 1144 INIT_LIST_HEAD(&sg_req_list); 1145 1146 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1147 1148 if (tdc->busy) 1149 tegra_dma_terminate_all(dc); 1150 1151 spin_lock_irqsave(&tdc->lock, flags); 1152 list_splice_init(&tdc->pending_sg_req, &sg_req_list); 1153 list_splice_init(&tdc->free_sg_req, &sg_req_list); 1154 list_splice_init(&tdc->free_dma_desc, &dma_desc_list); 1155 INIT_LIST_HEAD(&tdc->cb_desc); 1156 tdc->config_init = false; 1157 spin_unlock_irqrestore(&tdc->lock, flags); 1158 1159 while (!list_empty(&dma_desc_list)) { 1160 dma_desc = list_first_entry(&dma_desc_list, 1161 typeof(*dma_desc), node); 1162 list_del(&dma_desc->node); 1163 kfree(dma_desc); 1164 } 1165 1166 while (!list_empty(&sg_req_list)) { 1167 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); 1168 list_del(&sg_req->node); 1169 kfree(sg_req); 1170 } 1171 clk_disable_unprepare(tdma->dma_clk); 1172 } 1173 1174 /* Tegra20 specific DMA controller information */ 1175 static const struct tegra_dma_chip_data tegra20_dma_chip_data = { 1176 .nr_channels = 16, 1177 .max_dma_count = 1024UL * 64, 1178 }; 1179 1180 #if defined(CONFIG_OF) 1181 /* Tegra30 specific DMA controller information */ 1182 static const struct tegra_dma_chip_data tegra30_dma_chip_data = { 1183 .nr_channels = 32, 1184 .max_dma_count = 1024UL * 64, 1185 }; 1186 1187 static const struct of_device_id tegra_dma_of_match[] __devinitconst = { 1188 { 1189 .compatible = "nvidia,tegra30-apbdma", 1190 .data = &tegra30_dma_chip_data, 1191 }, { 1192 .compatible = "nvidia,tegra20-apbdma", 1193 .data = &tegra20_dma_chip_data, 1194 }, { 1195 }, 1196 }; 1197 MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1198 #endif 1199 1200 static int tegra_dma_probe(struct platform_device *pdev) 1201 { 1202 struct resource *res; 1203 struct tegra_dma *tdma; 1204 int ret; 1205 int i; 1206 const struct tegra_dma_chip_data *cdata = NULL; 1207 1208 if (pdev->dev.of_node) { 1209 const struct of_device_id *match; 1210 match = of_match_device(of_match_ptr(tegra_dma_of_match), 1211 &pdev->dev); 1212 if (!match) { 1213 dev_err(&pdev->dev, "Error: No device match found\n"); 1214 return -ENODEV; 1215 } 1216 cdata = match->data; 1217 } else { 1218 /* If no device tree then fallback to tegra20 */ 1219 cdata = &tegra20_dma_chip_data; 1220 } 1221 1222 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * 1223 sizeof(struct tegra_dma_channel), GFP_KERNEL); 1224 if (!tdma) { 1225 dev_err(&pdev->dev, "Error: memory allocation failed\n"); 1226 return -ENOMEM; 1227 } 1228 1229 tdma->dev = &pdev->dev; 1230 tdma->chip_data = cdata; 1231 platform_set_drvdata(pdev, tdma); 1232 1233 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1234 if (!res) { 1235 dev_err(&pdev->dev, "No mem resource for DMA\n"); 1236 return -EINVAL; 1237 } 1238 1239 tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res); 1240 if (!tdma->base_addr) { 1241 dev_err(&pdev->dev, 1242 "Cannot request memregion/iomap dma address\n"); 1243 return -EADDRNOTAVAIL; 1244 } 1245 1246 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); 1247 if (IS_ERR(tdma->dma_clk)) { 1248 dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1249 return PTR_ERR(tdma->dma_clk); 1250 } 1251 1252 spin_lock_init(&tdma->global_lock); 1253 1254 pm_runtime_enable(&pdev->dev); 1255 if (!pm_runtime_enabled(&pdev->dev)) { 1256 ret = tegra_dma_runtime_resume(&pdev->dev); 1257 if (ret) { 1258 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", 1259 ret); 1260 goto err_pm_disable; 1261 } 1262 } 1263 1264 /* Enable clock before accessing registers */ 1265 ret = clk_prepare_enable(tdma->dma_clk); 1266 if (ret < 0) { 1267 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); 1268 goto err_pm_disable; 1269 } 1270 1271 /* Reset DMA controller */ 1272 tegra_periph_reset_assert(tdma->dma_clk); 1273 udelay(2); 1274 tegra_periph_reset_deassert(tdma->dma_clk); 1275 1276 /* Enable global DMA registers */ 1277 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 1278 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1279 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1280 1281 clk_disable_unprepare(tdma->dma_clk); 1282 1283 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1284 for (i = 0; i < cdata->nr_channels; i++) { 1285 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1286 1287 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1288 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1289 1290 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1291 if (!res) { 1292 ret = -EINVAL; 1293 dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1294 goto err_irq; 1295 } 1296 tdc->irq = res->start; 1297 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); 1298 ret = devm_request_irq(&pdev->dev, tdc->irq, 1299 tegra_dma_isr, 0, tdc->name, tdc); 1300 if (ret) { 1301 dev_err(&pdev->dev, 1302 "request_irq failed with err %d channel %d\n", 1303 i, ret); 1304 goto err_irq; 1305 } 1306 1307 tdc->dma_chan.device = &tdma->dma_dev; 1308 dma_cookie_init(&tdc->dma_chan); 1309 list_add_tail(&tdc->dma_chan.device_node, 1310 &tdma->dma_dev.channels); 1311 tdc->tdma = tdma; 1312 tdc->id = i; 1313 1314 tasklet_init(&tdc->tasklet, tegra_dma_tasklet, 1315 (unsigned long)tdc); 1316 spin_lock_init(&tdc->lock); 1317 1318 INIT_LIST_HEAD(&tdc->pending_sg_req); 1319 INIT_LIST_HEAD(&tdc->free_sg_req); 1320 INIT_LIST_HEAD(&tdc->free_dma_desc); 1321 INIT_LIST_HEAD(&tdc->cb_desc); 1322 } 1323 1324 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1325 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1326 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1327 1328 tdma->dma_dev.dev = &pdev->dev; 1329 tdma->dma_dev.device_alloc_chan_resources = 1330 tegra_dma_alloc_chan_resources; 1331 tdma->dma_dev.device_free_chan_resources = 1332 tegra_dma_free_chan_resources; 1333 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1334 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1335 tdma->dma_dev.device_control = tegra_dma_device_control; 1336 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1337 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1338 1339 ret = dma_async_device_register(&tdma->dma_dev); 1340 if (ret < 0) { 1341 dev_err(&pdev->dev, 1342 "Tegra20 APB DMA driver registration failed %d\n", ret); 1343 goto err_irq; 1344 } 1345 1346 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", 1347 cdata->nr_channels); 1348 return 0; 1349 1350 err_irq: 1351 while (--i >= 0) { 1352 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1353 tasklet_kill(&tdc->tasklet); 1354 } 1355 1356 err_pm_disable: 1357 pm_runtime_disable(&pdev->dev); 1358 if (!pm_runtime_status_suspended(&pdev->dev)) 1359 tegra_dma_runtime_suspend(&pdev->dev); 1360 return ret; 1361 } 1362 1363 static int __devexit tegra_dma_remove(struct platform_device *pdev) 1364 { 1365 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1366 int i; 1367 struct tegra_dma_channel *tdc; 1368 1369 dma_async_device_unregister(&tdma->dma_dev); 1370 1371 for (i = 0; i < tdma->chip_data->nr_channels; ++i) { 1372 tdc = &tdma->channels[i]; 1373 tasklet_kill(&tdc->tasklet); 1374 } 1375 1376 pm_runtime_disable(&pdev->dev); 1377 if (!pm_runtime_status_suspended(&pdev->dev)) 1378 tegra_dma_runtime_suspend(&pdev->dev); 1379 1380 return 0; 1381 } 1382 1383 static int tegra_dma_runtime_suspend(struct device *dev) 1384 { 1385 struct platform_device *pdev = to_platform_device(dev); 1386 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1387 1388 clk_disable_unprepare(tdma->dma_clk); 1389 return 0; 1390 } 1391 1392 static int tegra_dma_runtime_resume(struct device *dev) 1393 { 1394 struct platform_device *pdev = to_platform_device(dev); 1395 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1396 int ret; 1397 1398 ret = clk_prepare_enable(tdma->dma_clk); 1399 if (ret < 0) { 1400 dev_err(dev, "clk_enable failed: %d\n", ret); 1401 return ret; 1402 } 1403 return 0; 1404 } 1405 1406 static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = { 1407 #ifdef CONFIG_PM_RUNTIME 1408 .runtime_suspend = tegra_dma_runtime_suspend, 1409 .runtime_resume = tegra_dma_runtime_resume, 1410 #endif 1411 }; 1412 1413 static struct platform_driver tegra_dmac_driver = { 1414 .driver = { 1415 .name = "tegra-apbdma", 1416 .owner = THIS_MODULE, 1417 .pm = &tegra_dma_dev_pm_ops, 1418 .of_match_table = of_match_ptr(tegra_dma_of_match), 1419 }, 1420 .probe = tegra_dma_probe, 1421 .remove = tegra_dma_remove, 1422 }; 1423 1424 module_platform_driver(tegra_dmac_driver); 1425 1426 MODULE_ALIAS("platform:tegra20-apbdma"); 1427 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); 1428 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1429 MODULE_LICENSE("GPL v2"); 1430