1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ADMA driver for Nvidia's Tegra210 ADMA controller. 4 * 5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/iopoll.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_dma.h> 13 #include <linux/of_irq.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/slab.h> 17 18 #include "virt-dma.h" 19 20 #define ADMA_CH_CMD 0x00 21 #define ADMA_CH_STATUS 0x0c 22 #define ADMA_CH_STATUS_XFER_EN BIT(0) 23 #define ADMA_CH_STATUS_XFER_PAUSED BIT(1) 24 25 #define ADMA_CH_INT_STATUS 0x10 26 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0) 27 28 #define ADMA_CH_INT_CLEAR 0x1c 29 #define ADMA_CH_CTRL 0x24 30 #define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift)) 31 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2 32 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 33 #define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift)) 34 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) 35 #define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0 36 37 #define ADMA_CH_CONFIG 0x28 38 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) 39 #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24) 40 #define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20 41 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 42 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) 43 #define ADMA_CH_CONFIG_MAX_BUFS 8 44 #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4) 45 46 #define ADMA_GLOBAL_CH_CONFIG 0x400 47 #define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7) 48 #define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8) 49 50 #define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30 51 #define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70 52 #define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84 53 #define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44 54 #define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48 55 #define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100 56 #define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104 57 #define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180 58 #define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184 59 #define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8 60 61 #define ADMA_CH_FIFO_CTRL 0x2c 62 #define ADMA_CH_TX_FIFO_SIZE_SHIFT 8 63 #define ADMA_CH_RX_FIFO_SIZE_SHIFT 0 64 #define ADMA_GLOBAL_CH_FIFO_CTRL 0x300 65 66 #define ADMA_CH_LOWER_SRC_ADDR 0x34 67 #define ADMA_CH_LOWER_TRG_ADDR 0x3c 68 #define ADMA_CH_TC 0x44 69 #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc 70 71 #define ADMA_CH_XFER_STATUS 0x54 72 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff 73 74 #define ADMA_GLOBAL_CMD 0x00 75 #define ADMA_GLOBAL_SOFT_RESET 0x04 76 77 #define TEGRA_ADMA_BURST_COMPLETE_TIME 20 78 79 #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) 80 81 struct tegra_adma; 82 83 /* 84 * struct tegra_adma_chip_data - Tegra chip specific data 85 * @adma_get_burst_config: Function callback used to set DMA burst size. 86 * @global_reg_offset: Register offset of DMA global register. 87 * @global_int_clear: Register offset of DMA global interrupt clear. 88 * @global_ch_fifo_base: Global channel fifo ctrl base offset 89 * @global_ch_config_base: Global channel config base offset 90 * @ch_req_tx_shift: Register offset for AHUB transmit channel select. 91 * @ch_req_rx_shift: Register offset for AHUB receive channel select. 92 * @ch_dir_shift: Channel direction bit position. 93 * @ch_mode_shift: Channel mode bit position. 94 * @ch_base_offset: Register offset of DMA channel registers. 95 * @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264 96 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. 97 * @ch_config: Outstanding and WRR config values 98 * @ch_req_mask: Mask for Tx or Rx channel select. 99 * @ch_dir_mask: Mask for channel direction. 100 * @ch_req_max: Maximum number of Tx or Rx channels available. 101 * @ch_reg_size: Size of DMA channel register space. 102 * @nr_channels: Number of DMA channels available. 103 * @ch_fifo_size_mask: Mask for FIFO size field. 104 * @sreq_index_offset: Slave channel index offset. 105 * @max_page: Maximum ADMA Channel Page. 106 * @set_global_pg_config: Global page programming. 107 */ 108 struct tegra_adma_chip_data { 109 unsigned int (*adma_get_burst_config)(unsigned int burst_size); 110 unsigned int global_reg_offset; 111 unsigned int global_int_clear; 112 unsigned int global_ch_fifo_base; 113 unsigned int global_ch_config_base; 114 unsigned int ch_req_tx_shift; 115 unsigned int ch_req_rx_shift; 116 unsigned int ch_dir_shift; 117 unsigned int ch_mode_shift; 118 unsigned int ch_base_offset; 119 unsigned int ch_tc_offset_diff; 120 unsigned int ch_fifo_ctrl; 121 unsigned int ch_config; 122 unsigned int ch_req_mask; 123 unsigned int ch_dir_mask; 124 unsigned int ch_req_max; 125 unsigned int ch_reg_size; 126 unsigned int nr_channels; 127 unsigned int ch_fifo_size_mask; 128 unsigned int sreq_index_offset; 129 unsigned int max_page; 130 void (*set_global_pg_config)(struct tegra_adma *tdma); 131 }; 132 133 /* 134 * struct tegra_adma_chan_regs - Tegra ADMA channel registers 135 */ 136 struct tegra_adma_chan_regs { 137 unsigned int ctrl; 138 unsigned int config; 139 unsigned int global_config; 140 unsigned int src_addr; 141 unsigned int trg_addr; 142 unsigned int fifo_ctrl; 143 unsigned int cmd; 144 unsigned int tc; 145 }; 146 147 /* 148 * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests. 149 */ 150 struct tegra_adma_desc { 151 struct virt_dma_desc vd; 152 struct tegra_adma_chan_regs ch_regs; 153 size_t buf_len; 154 size_t period_len; 155 size_t num_periods; 156 }; 157 158 /* 159 * struct tegra_adma_chan - Tegra ADMA channel information 160 */ 161 struct tegra_adma_chan { 162 struct virt_dma_chan vc; 163 struct tegra_adma_desc *desc; 164 struct tegra_adma *tdma; 165 int irq; 166 void __iomem *chan_addr; 167 168 /* Slave channel configuration info */ 169 struct dma_slave_config sconfig; 170 enum dma_transfer_direction sreq_dir; 171 unsigned int sreq_index; 172 bool sreq_reserved; 173 struct tegra_adma_chan_regs ch_regs; 174 175 /* Transfer count and position info */ 176 unsigned int tx_buf_count; 177 unsigned int tx_buf_pos; 178 179 unsigned int global_ch_fifo_offset; 180 unsigned int global_ch_config_offset; 181 }; 182 183 /* 184 * struct tegra_adma - Tegra ADMA controller information 185 */ 186 struct tegra_adma { 187 struct dma_device dma_dev; 188 struct device *dev; 189 void __iomem *base_addr; 190 void __iomem *ch_base_addr; 191 struct clk *ahub_clk; 192 unsigned int nr_channels; 193 unsigned long *dma_chan_mask; 194 unsigned long rx_requests_reserved; 195 unsigned long tx_requests_reserved; 196 197 /* Used to store global command register state when suspending */ 198 unsigned int global_cmd; 199 unsigned int ch_page_no; 200 201 const struct tegra_adma_chip_data *cdata; 202 203 /* Last member of the structure */ 204 struct tegra_adma_chan channels[] __counted_by(nr_channels); 205 }; 206 207 static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) 208 { 209 writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg); 210 } 211 212 static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg) 213 { 214 return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg); 215 } 216 217 static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val) 218 { 219 writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg); 220 } 221 222 static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val) 223 { 224 writel(val, tdc->chan_addr + reg); 225 } 226 227 static inline u32 tdma_ch_read(struct tegra_adma_chan *tdc, u32 reg) 228 { 229 return readl(tdc->chan_addr + reg); 230 } 231 232 static inline struct tegra_adma_chan *to_tegra_adma_chan(struct dma_chan *dc) 233 { 234 return container_of(dc, struct tegra_adma_chan, vc.chan); 235 } 236 237 static inline struct tegra_adma_desc *to_tegra_adma_desc( 238 struct dma_async_tx_descriptor *td) 239 { 240 return container_of(td, struct tegra_adma_desc, vd.tx); 241 } 242 243 static inline struct device *tdc2dev(struct tegra_adma_chan *tdc) 244 { 245 return tdc->tdma->dev; 246 } 247 248 static void tegra_adma_desc_free(struct virt_dma_desc *vd) 249 { 250 kfree(container_of(vd, struct tegra_adma_desc, vd)); 251 } 252 253 static int tegra_adma_slave_config(struct dma_chan *dc, 254 struct dma_slave_config *sconfig) 255 { 256 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 257 258 memcpy(&tdc->sconfig, sconfig, sizeof(*sconfig)); 259 260 return 0; 261 } 262 263 static void tegra186_adma_global_page_config(struct tegra_adma *tdma) 264 { 265 /* 266 * Clear the default page1 channel group configs and program 267 * the global registers based on the actual page usage 268 */ 269 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0); 270 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0); 271 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0); 272 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff); 273 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff); 274 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff); 275 } 276 277 static void tegra264_adma_global_page_config(struct tegra_adma *tdma) 278 { 279 u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET; 280 281 /* If the default page (page1) is not used, then clear page1 registers */ 282 if (tdma->ch_page_no) { 283 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0); 284 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0); 285 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0); 286 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0); 287 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0); 288 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0); 289 } 290 291 /* Program global registers for selected page */ 292 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff); 293 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff); 294 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff); 295 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1); 296 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff); 297 tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1); 298 } 299 300 static int tegra_adma_init(struct tegra_adma *tdma) 301 { 302 u32 status; 303 int ret; 304 305 /* Clear any channels group global interrupts */ 306 tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1); 307 308 if (!tdma->base_addr) 309 return 0; 310 311 /* Assert soft reset */ 312 tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); 313 314 /* Wait for reset to clear */ 315 ret = readx_poll_timeout(readl, 316 tdma->base_addr + 317 tdma->cdata->global_reg_offset + 318 ADMA_GLOBAL_SOFT_RESET, 319 status, status == 0, 20, 10000); 320 if (ret) 321 return ret; 322 323 if (tdma->cdata->set_global_pg_config) 324 tdma->cdata->set_global_pg_config(tdma); 325 326 /* Enable global ADMA registers */ 327 tdma_write(tdma, ADMA_GLOBAL_CMD, 1); 328 329 return 0; 330 } 331 332 static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, 333 enum dma_transfer_direction direction) 334 { 335 struct tegra_adma *tdma = tdc->tdma; 336 unsigned int sreq_index = tdc->sreq_index; 337 338 if (tdc->sreq_reserved) 339 return tdc->sreq_dir == direction ? 0 : -EINVAL; 340 341 if (sreq_index > tdma->cdata->ch_req_max) { 342 dev_err(tdma->dev, "invalid DMA request\n"); 343 return -EINVAL; 344 } 345 346 switch (direction) { 347 case DMA_MEM_TO_DEV: 348 if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { 349 dev_err(tdma->dev, "DMA request reserved\n"); 350 return -EINVAL; 351 } 352 break; 353 354 case DMA_DEV_TO_MEM: 355 if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { 356 dev_err(tdma->dev, "DMA request reserved\n"); 357 return -EINVAL; 358 } 359 break; 360 361 default: 362 dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", 363 dma_chan_name(&tdc->vc.chan)); 364 return -EINVAL; 365 } 366 367 tdc->sreq_dir = direction; 368 tdc->sreq_reserved = true; 369 370 return 0; 371 } 372 373 static void tegra_adma_request_free(struct tegra_adma_chan *tdc) 374 { 375 struct tegra_adma *tdma = tdc->tdma; 376 377 if (!tdc->sreq_reserved) 378 return; 379 380 switch (tdc->sreq_dir) { 381 case DMA_MEM_TO_DEV: 382 clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved); 383 break; 384 385 case DMA_DEV_TO_MEM: 386 clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved); 387 break; 388 389 default: 390 dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", 391 dma_chan_name(&tdc->vc.chan)); 392 return; 393 } 394 395 tdc->sreq_reserved = false; 396 } 397 398 static u32 tegra_adma_irq_status(struct tegra_adma_chan *tdc) 399 { 400 u32 status = tdma_ch_read(tdc, ADMA_CH_INT_STATUS); 401 402 return status & ADMA_CH_INT_STATUS_XFER_DONE; 403 } 404 405 static u32 tegra_adma_irq_clear(struct tegra_adma_chan *tdc) 406 { 407 u32 status = tegra_adma_irq_status(tdc); 408 409 if (status) 410 tdma_ch_write(tdc, ADMA_CH_INT_CLEAR, status); 411 412 return status; 413 } 414 415 static void tegra_adma_stop(struct tegra_adma_chan *tdc) 416 { 417 unsigned int status; 418 419 /* Disable ADMA */ 420 tdma_ch_write(tdc, ADMA_CH_CMD, 0); 421 422 /* Clear interrupt status */ 423 tegra_adma_irq_clear(tdc); 424 425 if (readx_poll_timeout_atomic(readl, tdc->chan_addr + ADMA_CH_STATUS, 426 status, !(status & ADMA_CH_STATUS_XFER_EN), 427 20, 10000)) { 428 dev_err(tdc2dev(tdc), "unable to stop DMA channel\n"); 429 return; 430 } 431 432 vchan_terminate_vdesc(&tdc->desc->vd); 433 tdc->desc = NULL; 434 } 435 436 static void tegra_adma_synchronize(struct dma_chan *dc) 437 { 438 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 439 440 vchan_synchronize(&tdc->vc); 441 } 442 443 static void tegra_adma_start(struct tegra_adma_chan *tdc) 444 { 445 struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc); 446 struct tegra_adma_chan_regs *ch_regs; 447 struct tegra_adma_desc *desc; 448 449 if (!vd) 450 return; 451 452 list_del(&vd->node); 453 454 desc = to_tegra_adma_desc(&vd->tx); 455 456 if (!desc) { 457 dev_warn(tdc2dev(tdc), "unable to start DMA, no descriptor\n"); 458 return; 459 } 460 461 ch_regs = &desc->ch_regs; 462 463 tdc->tx_buf_pos = 0; 464 tdc->tx_buf_count = 0; 465 tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc); 466 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 467 tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff, 468 ch_regs->src_addr); 469 tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff, 470 ch_regs->trg_addr); 471 472 if (!tdc->tdma->cdata->global_ch_fifo_base) 473 tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl); 474 else if (tdc->global_ch_fifo_offset) 475 tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl); 476 477 if (tdc->global_ch_config_offset) 478 tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config); 479 480 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config); 481 482 /* Start ADMA */ 483 tdma_ch_write(tdc, ADMA_CH_CMD, 1); 484 485 tdc->desc = desc; 486 } 487 488 static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc) 489 { 490 struct tegra_adma_desc *desc = tdc->desc; 491 unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1; 492 unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS - 493 tdc->tdma->cdata->ch_tc_offset_diff); 494 unsigned int periods_remaining; 495 496 /* 497 * Handle wrap around of buffer count register 498 */ 499 if (pos < tdc->tx_buf_pos) 500 tdc->tx_buf_count += pos + (max - tdc->tx_buf_pos); 501 else 502 tdc->tx_buf_count += pos - tdc->tx_buf_pos; 503 504 periods_remaining = tdc->tx_buf_count % desc->num_periods; 505 tdc->tx_buf_pos = pos; 506 507 return desc->buf_len - (periods_remaining * desc->period_len); 508 } 509 510 static irqreturn_t tegra_adma_isr(int irq, void *dev_id) 511 { 512 struct tegra_adma_chan *tdc = dev_id; 513 unsigned long status; 514 515 spin_lock(&tdc->vc.lock); 516 517 status = tegra_adma_irq_clear(tdc); 518 if (status == 0 || !tdc->desc) { 519 spin_unlock(&tdc->vc.lock); 520 return IRQ_NONE; 521 } 522 523 vchan_cyclic_callback(&tdc->desc->vd); 524 525 spin_unlock(&tdc->vc.lock); 526 527 return IRQ_HANDLED; 528 } 529 530 static void tegra_adma_issue_pending(struct dma_chan *dc) 531 { 532 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 533 unsigned long flags; 534 535 spin_lock_irqsave(&tdc->vc.lock, flags); 536 537 if (vchan_issue_pending(&tdc->vc)) { 538 if (!tdc->desc) 539 tegra_adma_start(tdc); 540 } 541 542 spin_unlock_irqrestore(&tdc->vc.lock, flags); 543 } 544 545 static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc) 546 { 547 u32 csts; 548 549 csts = tdma_ch_read(tdc, ADMA_CH_STATUS); 550 csts &= ADMA_CH_STATUS_XFER_PAUSED; 551 552 return csts ? true : false; 553 } 554 555 static int tegra_adma_pause(struct dma_chan *dc) 556 { 557 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 558 struct tegra_adma_desc *desc = tdc->desc; 559 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 560 int dcnt = 10; 561 562 ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 563 ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); 564 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 565 566 while (dcnt-- && !tegra_adma_is_paused(tdc)) 567 udelay(TEGRA_ADMA_BURST_COMPLETE_TIME); 568 569 if (dcnt < 0) { 570 dev_err(tdc2dev(tdc), "unable to pause DMA channel\n"); 571 return -EBUSY; 572 } 573 574 return 0; 575 } 576 577 static int tegra_adma_resume(struct dma_chan *dc) 578 { 579 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 580 struct tegra_adma_desc *desc = tdc->desc; 581 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 582 583 ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 584 ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); 585 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 586 587 return 0; 588 } 589 590 static int tegra_adma_terminate_all(struct dma_chan *dc) 591 { 592 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 593 unsigned long flags; 594 LIST_HEAD(head); 595 596 spin_lock_irqsave(&tdc->vc.lock, flags); 597 598 if (tdc->desc) 599 tegra_adma_stop(tdc); 600 601 tegra_adma_request_free(tdc); 602 vchan_get_all_descriptors(&tdc->vc, &head); 603 spin_unlock_irqrestore(&tdc->vc.lock, flags); 604 vchan_dma_desc_free_list(&tdc->vc, &head); 605 606 return 0; 607 } 608 609 static enum dma_status tegra_adma_tx_status(struct dma_chan *dc, 610 dma_cookie_t cookie, 611 struct dma_tx_state *txstate) 612 { 613 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 614 struct tegra_adma_desc *desc; 615 struct virt_dma_desc *vd; 616 enum dma_status ret; 617 unsigned long flags; 618 unsigned int residual; 619 620 ret = dma_cookie_status(dc, cookie, txstate); 621 if (ret == DMA_COMPLETE || !txstate) 622 return ret; 623 624 spin_lock_irqsave(&tdc->vc.lock, flags); 625 626 vd = vchan_find_desc(&tdc->vc, cookie); 627 if (vd) { 628 desc = to_tegra_adma_desc(&vd->tx); 629 residual = desc->ch_regs.tc; 630 } else if (tdc->desc && tdc->desc->vd.tx.cookie == cookie) { 631 residual = tegra_adma_get_residue(tdc); 632 } else { 633 residual = 0; 634 } 635 636 spin_unlock_irqrestore(&tdc->vc.lock, flags); 637 638 dma_set_residue(txstate, residual); 639 640 return ret; 641 } 642 643 static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size) 644 { 645 if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) 646 burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; 647 648 return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; 649 } 650 651 static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size) 652 { 653 if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) 654 burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; 655 656 return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; 657 } 658 659 static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, 660 struct tegra_adma_desc *desc, 661 dma_addr_t buf_addr, 662 enum dma_transfer_direction direction) 663 { 664 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 665 const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata; 666 unsigned int burst_size, adma_dir, fifo_size_shift; 667 668 if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) 669 return -EINVAL; 670 671 switch (direction) { 672 case DMA_MEM_TO_DEV: 673 fifo_size_shift = ADMA_CH_TX_FIFO_SIZE_SHIFT; 674 adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; 675 burst_size = tdc->sconfig.dst_maxburst; 676 ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); 677 ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, 678 cdata->ch_req_mask, 679 cdata->ch_req_tx_shift); 680 ch_regs->src_addr = buf_addr; 681 break; 682 683 case DMA_DEV_TO_MEM: 684 fifo_size_shift = ADMA_CH_RX_FIFO_SIZE_SHIFT; 685 adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; 686 burst_size = tdc->sconfig.src_maxburst; 687 ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); 688 ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, 689 cdata->ch_req_mask, 690 cdata->ch_req_rx_shift); 691 ch_regs->trg_addr = buf_addr; 692 break; 693 694 default: 695 dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); 696 return -EINVAL; 697 } 698 699 ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask, 700 cdata->ch_dir_shift) | 701 ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) | 702 ADMA_CH_CTRL_FLOWCTRL_EN; 703 ch_regs->config |= cdata->adma_get_burst_config(burst_size); 704 705 if (cdata->global_ch_config_base) 706 ch_regs->global_config |= cdata->ch_config; 707 else 708 ch_regs->config |= cdata->ch_config; 709 710 /* 711 * 'sreq_index' represents the current ADMAIF channel number and as per 712 * HW recommendation its FIFO size should match with the corresponding 713 * ADMA channel. 714 * 715 * ADMA FIFO size is set as per below (based on default ADMAIF channel 716 * FIFO sizes): 717 * fifo_size = 0x2 (sreq_index > sreq_index_offset) 718 * fifo_size = 0x3 (sreq_index <= sreq_index_offset) 719 * 720 */ 721 if (tdc->sreq_index > cdata->sreq_index_offset) 722 ch_regs->fifo_ctrl = 723 ADMA_CH_REG_FIELD_VAL(2, cdata->ch_fifo_size_mask, 724 fifo_size_shift); 725 else 726 ch_regs->fifo_ctrl = 727 ADMA_CH_REG_FIELD_VAL(3, cdata->ch_fifo_size_mask, 728 fifo_size_shift); 729 730 ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; 731 732 return tegra_adma_request_alloc(tdc, direction); 733 } 734 735 static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic( 736 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 737 size_t period_len, enum dma_transfer_direction direction, 738 unsigned long flags) 739 { 740 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 741 struct tegra_adma_desc *desc = NULL; 742 743 if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) { 744 dev_err(tdc2dev(tdc), "invalid buffer/period len\n"); 745 return NULL; 746 } 747 748 if (buf_len % period_len) { 749 dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n"); 750 return NULL; 751 } 752 753 if (!IS_ALIGNED(buf_addr, 4)) { 754 dev_err(tdc2dev(tdc), "invalid buffer alignment\n"); 755 return NULL; 756 } 757 758 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 759 if (!desc) 760 return NULL; 761 762 desc->buf_len = buf_len; 763 desc->period_len = period_len; 764 desc->num_periods = buf_len / period_len; 765 766 if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) { 767 kfree(desc); 768 return NULL; 769 } 770 771 return vchan_tx_prep(&tdc->vc, &desc->vd, flags); 772 } 773 774 static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) 775 { 776 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 777 int ret; 778 779 ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc); 780 if (ret) { 781 dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n", 782 dma_chan_name(dc)); 783 return ret; 784 } 785 786 ret = pm_runtime_resume_and_get(tdc2dev(tdc)); 787 if (ret < 0) { 788 free_irq(tdc->irq, tdc); 789 return ret; 790 } 791 792 dma_cookie_init(&tdc->vc.chan); 793 794 return 0; 795 } 796 797 static void tegra_adma_free_chan_resources(struct dma_chan *dc) 798 { 799 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 800 801 tegra_adma_terminate_all(dc); 802 vchan_free_chan_resources(&tdc->vc); 803 tasklet_kill(&tdc->vc.task); 804 free_irq(tdc->irq, tdc); 805 pm_runtime_put(tdc2dev(tdc)); 806 807 tdc->sreq_index = 0; 808 tdc->sreq_dir = DMA_TRANS_NONE; 809 } 810 811 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, 812 struct of_dma *ofdma) 813 { 814 struct tegra_adma *tdma = ofdma->of_dma_data; 815 struct tegra_adma_chan *tdc; 816 struct dma_chan *chan; 817 unsigned int sreq_index; 818 819 if (dma_spec->args_count != 1) 820 return NULL; 821 822 sreq_index = dma_spec->args[0]; 823 824 if (sreq_index == 0) { 825 dev_err(tdma->dev, "DMA request must not be 0\n"); 826 return NULL; 827 } 828 829 chan = dma_get_any_slave_channel(&tdma->dma_dev); 830 if (!chan) 831 return NULL; 832 833 tdc = to_tegra_adma_chan(chan); 834 tdc->sreq_index = sreq_index; 835 836 return chan; 837 } 838 839 static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev) 840 { 841 struct tegra_adma *tdma = dev_get_drvdata(dev); 842 struct tegra_adma_chan_regs *ch_reg; 843 struct tegra_adma_chan *tdc; 844 int i; 845 846 if (tdma->base_addr) 847 tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); 848 849 if (!tdma->global_cmd) 850 goto clk_disable; 851 852 for (i = 0; i < tdma->nr_channels; i++) { 853 tdc = &tdma->channels[i]; 854 /* skip for reserved channels */ 855 if (!tdc->tdma) 856 continue; 857 858 ch_reg = &tdc->ch_regs; 859 ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD); 860 /* skip if channel is not active */ 861 if (!ch_reg->cmd) 862 continue; 863 ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff); 864 ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR - 865 tdma->cdata->ch_tc_offset_diff); 866 ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR - 867 tdma->cdata->ch_tc_offset_diff); 868 ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 869 870 if (tdc->global_ch_config_offset) 871 ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset); 872 873 if (!tdc->tdma->cdata->global_ch_fifo_base) 874 ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL); 875 else if (tdc->global_ch_fifo_offset) 876 ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset); 877 878 ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG); 879 880 } 881 882 clk_disable: 883 clk_disable_unprepare(tdma->ahub_clk); 884 885 return 0; 886 } 887 888 static int __maybe_unused tegra_adma_runtime_resume(struct device *dev) 889 { 890 struct tegra_adma *tdma = dev_get_drvdata(dev); 891 struct tegra_adma_chan_regs *ch_reg; 892 struct tegra_adma_chan *tdc; 893 int ret, i; 894 895 ret = clk_prepare_enable(tdma->ahub_clk); 896 if (ret) { 897 dev_err(dev, "ahub clk_enable failed: %d\n", ret); 898 return ret; 899 } 900 if (tdma->base_addr) { 901 tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); 902 if (tdma->cdata->set_global_pg_config) 903 tdma->cdata->set_global_pg_config(tdma); 904 } 905 906 if (!tdma->global_cmd) 907 return 0; 908 909 for (i = 0; i < tdma->nr_channels; i++) { 910 tdc = &tdma->channels[i]; 911 /* skip for reserved channels */ 912 if (!tdc->tdma) 913 continue; 914 ch_reg = &tdc->ch_regs; 915 /* skip if channel was not active earlier */ 916 if (!ch_reg->cmd) 917 continue; 918 tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc); 919 tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff, 920 ch_reg->src_addr); 921 tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff, 922 ch_reg->trg_addr); 923 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl); 924 925 if (!tdc->tdma->cdata->global_ch_fifo_base) 926 tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl); 927 else if (tdc->global_ch_fifo_offset) 928 tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl); 929 930 if (tdc->global_ch_config_offset) 931 tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config); 932 933 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config); 934 935 tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd); 936 } 937 938 return 0; 939 } 940 941 static const struct tegra_adma_chip_data tegra210_chip_data = { 942 .adma_get_burst_config = tegra210_adma_get_burst_config, 943 .global_reg_offset = 0xc00, 944 .global_int_clear = 0x20, 945 .global_ch_fifo_base = 0, 946 .global_ch_config_base = 0, 947 .ch_req_tx_shift = 28, 948 .ch_req_rx_shift = 24, 949 .ch_dir_shift = 12, 950 .ch_mode_shift = 8, 951 .ch_base_offset = 0, 952 .ch_tc_offset_diff = 0, 953 .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1), 954 .ch_req_mask = 0xf, 955 .ch_dir_mask = 0xf, 956 .ch_req_max = 10, 957 .ch_reg_size = 0x80, 958 .nr_channels = 22, 959 .ch_fifo_size_mask = 0xf, 960 .sreq_index_offset = 2, 961 .max_page = 0, 962 .set_global_pg_config = NULL, 963 }; 964 965 static const struct tegra_adma_chip_data tegra186_chip_data = { 966 .adma_get_burst_config = tegra186_adma_get_burst_config, 967 .global_reg_offset = 0, 968 .global_int_clear = 0x402c, 969 .global_ch_fifo_base = 0, 970 .global_ch_config_base = 0, 971 .ch_req_tx_shift = 27, 972 .ch_req_rx_shift = 22, 973 .ch_dir_shift = 12, 974 .ch_mode_shift = 8, 975 .ch_base_offset = 0x10000, 976 .ch_tc_offset_diff = 0, 977 .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) | 978 TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8), 979 .ch_req_mask = 0x1f, 980 .ch_dir_mask = 0xf, 981 .ch_req_max = 20, 982 .ch_reg_size = 0x100, 983 .nr_channels = 32, 984 .ch_fifo_size_mask = 0x1f, 985 .sreq_index_offset = 4, 986 .max_page = 4, 987 .set_global_pg_config = tegra186_adma_global_page_config, 988 }; 989 990 static const struct tegra_adma_chip_data tegra264_chip_data = { 991 .adma_get_burst_config = tegra186_adma_get_burst_config, 992 .global_reg_offset = 0, 993 .global_int_clear = 0x800c, 994 .global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL, 995 .global_ch_config_base = ADMA_GLOBAL_CH_CONFIG, 996 .ch_req_tx_shift = 26, 997 .ch_req_rx_shift = 20, 998 .ch_dir_shift = 10, 999 .ch_mode_shift = 7, 1000 .ch_base_offset = 0x10000, 1001 .ch_tc_offset_diff = 4, 1002 .ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) | 1003 ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8), 1004 .ch_req_mask = 0x3f, 1005 .ch_dir_mask = 7, 1006 .ch_req_max = 32, 1007 .ch_reg_size = 0x100, 1008 .nr_channels = 64, 1009 .ch_fifo_size_mask = 0x7f, 1010 .sreq_index_offset = 0, 1011 .max_page = 10, 1012 .set_global_pg_config = tegra264_adma_global_page_config, 1013 }; 1014 1015 static const struct of_device_id tegra_adma_of_match[] = { 1016 { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, 1017 { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data }, 1018 { .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data }, 1019 { }, 1020 }; 1021 MODULE_DEVICE_TABLE(of, tegra_adma_of_match); 1022 1023 static int tegra_adma_probe(struct platform_device *pdev) 1024 { 1025 const struct tegra_adma_chip_data *cdata; 1026 struct tegra_adma *tdma; 1027 struct resource *res_page, *res_base; 1028 int ret, i; 1029 1030 cdata = of_device_get_match_data(&pdev->dev); 1031 if (!cdata) { 1032 dev_err(&pdev->dev, "device match data not found\n"); 1033 return -ENODEV; 1034 } 1035 1036 tdma = devm_kzalloc(&pdev->dev, 1037 struct_size(tdma, channels, cdata->nr_channels), 1038 GFP_KERNEL); 1039 if (!tdma) 1040 return -ENOMEM; 1041 1042 tdma->dev = &pdev->dev; 1043 tdma->cdata = cdata; 1044 tdma->nr_channels = cdata->nr_channels; 1045 platform_set_drvdata(pdev, tdma); 1046 1047 res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page"); 1048 if (res_page) { 1049 tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page); 1050 if (IS_ERR(tdma->ch_base_addr)) 1051 return PTR_ERR(tdma->ch_base_addr); 1052 1053 res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global"); 1054 if (res_base) { 1055 resource_size_t page_offset, page_no; 1056 unsigned int ch_base_offset; 1057 1058 if (res_page->start < res_base->start) 1059 return -EINVAL; 1060 page_offset = res_page->start - res_base->start; 1061 ch_base_offset = cdata->ch_base_offset; 1062 if (!ch_base_offset) 1063 return -EINVAL; 1064 1065 page_no = div_u64(page_offset, ch_base_offset); 1066 if (!page_no || page_no > INT_MAX) 1067 return -EINVAL; 1068 1069 tdma->ch_page_no = page_no - 1; 1070 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base); 1071 if (IS_ERR(tdma->base_addr)) 1072 return PTR_ERR(tdma->base_addr); 1073 } 1074 } else { 1075 /* If no 'page' property found, then reg DT binding would be legacy */ 1076 res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1077 if (res_base) { 1078 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base); 1079 if (IS_ERR(tdma->base_addr)) 1080 return PTR_ERR(tdma->base_addr); 1081 } else { 1082 return -ENODEV; 1083 } 1084 1085 tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset; 1086 } 1087 1088 tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio"); 1089 if (IS_ERR(tdma->ahub_clk)) { 1090 dev_err(&pdev->dev, "Error: Missing ahub controller clock\n"); 1091 return PTR_ERR(tdma->ahub_clk); 1092 } 1093 1094 tdma->dma_chan_mask = devm_kzalloc(&pdev->dev, 1095 BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long), 1096 GFP_KERNEL); 1097 if (!tdma->dma_chan_mask) 1098 return -ENOMEM; 1099 1100 /* Enable all channels by default */ 1101 bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels); 1102 1103 ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask", 1104 (u32 *)tdma->dma_chan_mask, 1105 BITS_TO_U32(tdma->nr_channels)); 1106 if (ret < 0 && (ret != -EINVAL)) { 1107 dev_err(&pdev->dev, "dma-channel-mask is not complete.\n"); 1108 return ret; 1109 } 1110 1111 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1112 for (i = 0; i < tdma->nr_channels; i++) { 1113 struct tegra_adma_chan *tdc = &tdma->channels[i]; 1114 1115 /* skip for reserved channels */ 1116 if (!test_bit(i, tdma->dma_chan_mask)) 1117 continue; 1118 1119 tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i); 1120 1121 if (tdma->base_addr) { 1122 if (cdata->global_ch_fifo_base) 1123 tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i); 1124 1125 if (cdata->global_ch_config_base) 1126 tdc->global_ch_config_offset = 1127 cdata->global_ch_config_base + (4 * i); 1128 } 1129 1130 tdc->irq = of_irq_get(pdev->dev.of_node, i); 1131 if (tdc->irq <= 0) { 1132 ret = tdc->irq ?: -ENXIO; 1133 goto irq_dispose; 1134 } 1135 1136 vchan_init(&tdc->vc, &tdma->dma_dev); 1137 tdc->vc.desc_free = tegra_adma_desc_free; 1138 tdc->tdma = tdma; 1139 } 1140 1141 pm_runtime_enable(&pdev->dev); 1142 1143 ret = pm_runtime_resume_and_get(&pdev->dev); 1144 if (ret < 0) 1145 goto rpm_disable; 1146 1147 ret = tegra_adma_init(tdma); 1148 if (ret) 1149 goto rpm_put; 1150 1151 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1152 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1153 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1154 1155 tdma->dma_dev.dev = &pdev->dev; 1156 tdma->dma_dev.device_alloc_chan_resources = 1157 tegra_adma_alloc_chan_resources; 1158 tdma->dma_dev.device_free_chan_resources = 1159 tegra_adma_free_chan_resources; 1160 tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending; 1161 tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic; 1162 tdma->dma_dev.device_config = tegra_adma_slave_config; 1163 tdma->dma_dev.device_tx_status = tegra_adma_tx_status; 1164 tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; 1165 tdma->dma_dev.device_synchronize = tegra_adma_synchronize; 1166 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1167 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1168 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1169 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 1170 tdma->dma_dev.device_pause = tegra_adma_pause; 1171 tdma->dma_dev.device_resume = tegra_adma_resume; 1172 1173 ret = dma_async_device_register(&tdma->dma_dev); 1174 if (ret < 0) { 1175 dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); 1176 goto rpm_put; 1177 } 1178 1179 ret = of_dma_controller_register(pdev->dev.of_node, 1180 tegra_dma_of_xlate, tdma); 1181 if (ret < 0) { 1182 dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret); 1183 goto dma_remove; 1184 } 1185 1186 pm_runtime_put(&pdev->dev); 1187 1188 dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n", 1189 tdma->nr_channels); 1190 1191 return 0; 1192 1193 dma_remove: 1194 dma_async_device_unregister(&tdma->dma_dev); 1195 rpm_put: 1196 pm_runtime_put_sync(&pdev->dev); 1197 rpm_disable: 1198 pm_runtime_disable(&pdev->dev); 1199 irq_dispose: 1200 while (--i >= 0) 1201 irq_dispose_mapping(tdma->channels[i].irq); 1202 1203 return ret; 1204 } 1205 1206 static void tegra_adma_remove(struct platform_device *pdev) 1207 { 1208 struct tegra_adma *tdma = platform_get_drvdata(pdev); 1209 int i; 1210 1211 of_dma_controller_free(pdev->dev.of_node); 1212 dma_async_device_unregister(&tdma->dma_dev); 1213 1214 for (i = 0; i < tdma->nr_channels; ++i) { 1215 if (tdma->channels[i].irq) 1216 irq_dispose_mapping(tdma->channels[i].irq); 1217 } 1218 1219 pm_runtime_disable(&pdev->dev); 1220 } 1221 1222 static const struct dev_pm_ops tegra_adma_dev_pm_ops = { 1223 SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend, 1224 tegra_adma_runtime_resume, NULL) 1225 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1226 pm_runtime_force_resume) 1227 }; 1228 1229 static struct platform_driver tegra_admac_driver = { 1230 .driver = { 1231 .name = "tegra-adma", 1232 .pm = &tegra_adma_dev_pm_ops, 1233 .of_match_table = tegra_adma_of_match, 1234 }, 1235 .probe = tegra_adma_probe, 1236 .remove = tegra_adma_remove, 1237 }; 1238 1239 module_platform_driver(tegra_admac_driver); 1240 1241 MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver"); 1242 MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>"); 1243 MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>"); 1244 MODULE_LICENSE("GPL v2"); 1245