1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ADMA driver for Nvidia's Tegra210 ADMA controller. 4 * 5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/iopoll.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_dma.h> 13 #include <linux/of_irq.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/slab.h> 17 18 #include "virt-dma.h" 19 20 #define ADMA_CH_CMD 0x00 21 #define ADMA_CH_STATUS 0x0c 22 #define ADMA_CH_STATUS_XFER_EN BIT(0) 23 #define ADMA_CH_STATUS_XFER_PAUSED BIT(1) 24 25 #define ADMA_CH_INT_STATUS 0x10 26 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0) 27 28 #define ADMA_CH_INT_CLEAR 0x1c 29 #define ADMA_CH_CTRL 0x24 30 #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12) 31 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2 32 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 33 #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8) 34 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) 35 #define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0 36 37 #define ADMA_CH_CONFIG 0x28 38 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) 39 #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24) 40 #define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20 41 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 42 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) 43 #define ADMA_CH_CONFIG_MAX_BUFS 8 44 #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) 45 46 #define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30 47 #define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70 48 #define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84 49 50 #define ADMA_CH_FIFO_CTRL 0x2c 51 #define ADMA_CH_TX_FIFO_SIZE_SHIFT 8 52 #define ADMA_CH_RX_FIFO_SIZE_SHIFT 0 53 54 #define ADMA_CH_LOWER_SRC_ADDR 0x34 55 #define ADMA_CH_LOWER_TRG_ADDR 0x3c 56 #define ADMA_CH_TC 0x44 57 #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc 58 59 #define ADMA_CH_XFER_STATUS 0x54 60 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff 61 62 #define ADMA_GLOBAL_CMD 0x00 63 #define ADMA_GLOBAL_SOFT_RESET 0x04 64 65 #define TEGRA_ADMA_BURST_COMPLETE_TIME 20 66 67 #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) 68 69 struct tegra_adma; 70 71 /* 72 * struct tegra_adma_chip_data - Tegra chip specific data 73 * @adma_get_burst_config: Function callback used to set DMA burst size. 74 * @global_reg_offset: Register offset of DMA global register. 75 * @global_int_clear: Register offset of DMA global interrupt clear. 76 * @ch_req_tx_shift: Register offset for AHUB transmit channel select. 77 * @ch_req_rx_shift: Register offset for AHUB receive channel select. 78 * @ch_base_offset: Register offset of DMA channel registers. 79 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. 80 * @ch_req_mask: Mask for Tx or Rx channel select. 81 * @ch_req_max: Maximum number of Tx or Rx channels available. 82 * @ch_reg_size: Size of DMA channel register space. 83 * @nr_channels: Number of DMA channels available. 84 * @ch_fifo_size_mask: Mask for FIFO size field. 85 * @sreq_index_offset: Slave channel index offset. 86 * @max_page: Maximum ADMA Channel Page. 87 * @has_outstanding_reqs: If DMA channel can have outstanding requests. 88 * @set_global_pg_config: Global page programming. 89 */ 90 struct tegra_adma_chip_data { 91 unsigned int (*adma_get_burst_config)(unsigned int burst_size); 92 unsigned int global_reg_offset; 93 unsigned int global_int_clear; 94 unsigned int ch_req_tx_shift; 95 unsigned int ch_req_rx_shift; 96 unsigned int ch_base_offset; 97 unsigned int ch_fifo_ctrl; 98 unsigned int ch_req_mask; 99 unsigned int ch_req_max; 100 unsigned int ch_reg_size; 101 unsigned int nr_channels; 102 unsigned int ch_fifo_size_mask; 103 unsigned int sreq_index_offset; 104 unsigned int max_page; 105 bool has_outstanding_reqs; 106 void (*set_global_pg_config)(struct tegra_adma *tdma); 107 }; 108 109 /* 110 * struct tegra_adma_chan_regs - Tegra ADMA channel registers 111 */ 112 struct tegra_adma_chan_regs { 113 unsigned int ctrl; 114 unsigned int config; 115 unsigned int src_addr; 116 unsigned int trg_addr; 117 unsigned int fifo_ctrl; 118 unsigned int cmd; 119 unsigned int tc; 120 }; 121 122 /* 123 * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests. 124 */ 125 struct tegra_adma_desc { 126 struct virt_dma_desc vd; 127 struct tegra_adma_chan_regs ch_regs; 128 size_t buf_len; 129 size_t period_len; 130 size_t num_periods; 131 }; 132 133 /* 134 * struct tegra_adma_chan - Tegra ADMA channel information 135 */ 136 struct tegra_adma_chan { 137 struct virt_dma_chan vc; 138 struct tegra_adma_desc *desc; 139 struct tegra_adma *tdma; 140 int irq; 141 void __iomem *chan_addr; 142 143 /* Slave channel configuration info */ 144 struct dma_slave_config sconfig; 145 enum dma_transfer_direction sreq_dir; 146 unsigned int sreq_index; 147 bool sreq_reserved; 148 struct tegra_adma_chan_regs ch_regs; 149 150 /* Transfer count and position info */ 151 unsigned int tx_buf_count; 152 unsigned int tx_buf_pos; 153 }; 154 155 /* 156 * struct tegra_adma - Tegra ADMA controller information 157 */ 158 struct tegra_adma { 159 struct dma_device dma_dev; 160 struct device *dev; 161 void __iomem *base_addr; 162 void __iomem *ch_base_addr; 163 struct clk *ahub_clk; 164 unsigned int nr_channels; 165 unsigned long *dma_chan_mask; 166 unsigned long rx_requests_reserved; 167 unsigned long tx_requests_reserved; 168 169 /* Used to store global command register state when suspending */ 170 unsigned int global_cmd; 171 unsigned int ch_page_no; 172 173 const struct tegra_adma_chip_data *cdata; 174 175 /* Last member of the structure */ 176 struct tegra_adma_chan channels[] __counted_by(nr_channels); 177 }; 178 179 static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) 180 { 181 writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg); 182 } 183 184 static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg) 185 { 186 return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg); 187 } 188 189 static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val) 190 { 191 writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg); 192 } 193 194 static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val) 195 { 196 writel(val, tdc->chan_addr + reg); 197 } 198 199 static inline u32 tdma_ch_read(struct tegra_adma_chan *tdc, u32 reg) 200 { 201 return readl(tdc->chan_addr + reg); 202 } 203 204 static inline struct tegra_adma_chan *to_tegra_adma_chan(struct dma_chan *dc) 205 { 206 return container_of(dc, struct tegra_adma_chan, vc.chan); 207 } 208 209 static inline struct tegra_adma_desc *to_tegra_adma_desc( 210 struct dma_async_tx_descriptor *td) 211 { 212 return container_of(td, struct tegra_adma_desc, vd.tx); 213 } 214 215 static inline struct device *tdc2dev(struct tegra_adma_chan *tdc) 216 { 217 return tdc->tdma->dev; 218 } 219 220 static void tegra_adma_desc_free(struct virt_dma_desc *vd) 221 { 222 kfree(container_of(vd, struct tegra_adma_desc, vd)); 223 } 224 225 static int tegra_adma_slave_config(struct dma_chan *dc, 226 struct dma_slave_config *sconfig) 227 { 228 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 229 230 memcpy(&tdc->sconfig, sconfig, sizeof(*sconfig)); 231 232 return 0; 233 } 234 235 static void tegra186_adma_global_page_config(struct tegra_adma *tdma) 236 { 237 /* 238 * Clear the default page1 channel group configs and program 239 * the global registers based on the actual page usage 240 */ 241 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0); 242 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0); 243 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0); 244 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff); 245 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff); 246 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff); 247 } 248 249 static int tegra_adma_init(struct tegra_adma *tdma) 250 { 251 u32 status; 252 int ret; 253 254 /* Clear any channels group global interrupts */ 255 tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1); 256 257 if (!tdma->base_addr) 258 return 0; 259 260 /* Assert soft reset */ 261 tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); 262 263 /* Wait for reset to clear */ 264 ret = readx_poll_timeout(readl, 265 tdma->base_addr + 266 tdma->cdata->global_reg_offset + 267 ADMA_GLOBAL_SOFT_RESET, 268 status, status == 0, 20, 10000); 269 if (ret) 270 return ret; 271 272 if (tdma->cdata->set_global_pg_config) 273 tdma->cdata->set_global_pg_config(tdma); 274 275 /* Enable global ADMA registers */ 276 tdma_write(tdma, ADMA_GLOBAL_CMD, 1); 277 278 return 0; 279 } 280 281 static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, 282 enum dma_transfer_direction direction) 283 { 284 struct tegra_adma *tdma = tdc->tdma; 285 unsigned int sreq_index = tdc->sreq_index; 286 287 if (tdc->sreq_reserved) 288 return tdc->sreq_dir == direction ? 0 : -EINVAL; 289 290 if (sreq_index > tdma->cdata->ch_req_max) { 291 dev_err(tdma->dev, "invalid DMA request\n"); 292 return -EINVAL; 293 } 294 295 switch (direction) { 296 case DMA_MEM_TO_DEV: 297 if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { 298 dev_err(tdma->dev, "DMA request reserved\n"); 299 return -EINVAL; 300 } 301 break; 302 303 case DMA_DEV_TO_MEM: 304 if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { 305 dev_err(tdma->dev, "DMA request reserved\n"); 306 return -EINVAL; 307 } 308 break; 309 310 default: 311 dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", 312 dma_chan_name(&tdc->vc.chan)); 313 return -EINVAL; 314 } 315 316 tdc->sreq_dir = direction; 317 tdc->sreq_reserved = true; 318 319 return 0; 320 } 321 322 static void tegra_adma_request_free(struct tegra_adma_chan *tdc) 323 { 324 struct tegra_adma *tdma = tdc->tdma; 325 326 if (!tdc->sreq_reserved) 327 return; 328 329 switch (tdc->sreq_dir) { 330 case DMA_MEM_TO_DEV: 331 clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved); 332 break; 333 334 case DMA_DEV_TO_MEM: 335 clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved); 336 break; 337 338 default: 339 dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", 340 dma_chan_name(&tdc->vc.chan)); 341 return; 342 } 343 344 tdc->sreq_reserved = false; 345 } 346 347 static u32 tegra_adma_irq_status(struct tegra_adma_chan *tdc) 348 { 349 u32 status = tdma_ch_read(tdc, ADMA_CH_INT_STATUS); 350 351 return status & ADMA_CH_INT_STATUS_XFER_DONE; 352 } 353 354 static u32 tegra_adma_irq_clear(struct tegra_adma_chan *tdc) 355 { 356 u32 status = tegra_adma_irq_status(tdc); 357 358 if (status) 359 tdma_ch_write(tdc, ADMA_CH_INT_CLEAR, status); 360 361 return status; 362 } 363 364 static void tegra_adma_stop(struct tegra_adma_chan *tdc) 365 { 366 unsigned int status; 367 368 /* Disable ADMA */ 369 tdma_ch_write(tdc, ADMA_CH_CMD, 0); 370 371 /* Clear interrupt status */ 372 tegra_adma_irq_clear(tdc); 373 374 if (readx_poll_timeout_atomic(readl, tdc->chan_addr + ADMA_CH_STATUS, 375 status, !(status & ADMA_CH_STATUS_XFER_EN), 376 20, 10000)) { 377 dev_err(tdc2dev(tdc), "unable to stop DMA channel\n"); 378 return; 379 } 380 381 kfree(tdc->desc); 382 tdc->desc = NULL; 383 } 384 385 static void tegra_adma_start(struct tegra_adma_chan *tdc) 386 { 387 struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc); 388 struct tegra_adma_chan_regs *ch_regs; 389 struct tegra_adma_desc *desc; 390 391 if (!vd) 392 return; 393 394 list_del(&vd->node); 395 396 desc = to_tegra_adma_desc(&vd->tx); 397 398 if (!desc) { 399 dev_warn(tdc2dev(tdc), "unable to start DMA, no descriptor\n"); 400 return; 401 } 402 403 ch_regs = &desc->ch_regs; 404 405 tdc->tx_buf_pos = 0; 406 tdc->tx_buf_count = 0; 407 tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc); 408 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 409 tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr); 410 tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr); 411 tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl); 412 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config); 413 414 /* Start ADMA */ 415 tdma_ch_write(tdc, ADMA_CH_CMD, 1); 416 417 tdc->desc = desc; 418 } 419 420 static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc) 421 { 422 struct tegra_adma_desc *desc = tdc->desc; 423 unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1; 424 unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS); 425 unsigned int periods_remaining; 426 427 /* 428 * Handle wrap around of buffer count register 429 */ 430 if (pos < tdc->tx_buf_pos) 431 tdc->tx_buf_count += pos + (max - tdc->tx_buf_pos); 432 else 433 tdc->tx_buf_count += pos - tdc->tx_buf_pos; 434 435 periods_remaining = tdc->tx_buf_count % desc->num_periods; 436 tdc->tx_buf_pos = pos; 437 438 return desc->buf_len - (periods_remaining * desc->period_len); 439 } 440 441 static irqreturn_t tegra_adma_isr(int irq, void *dev_id) 442 { 443 struct tegra_adma_chan *tdc = dev_id; 444 unsigned long status; 445 446 spin_lock(&tdc->vc.lock); 447 448 status = tegra_adma_irq_clear(tdc); 449 if (status == 0 || !tdc->desc) { 450 spin_unlock(&tdc->vc.lock); 451 return IRQ_NONE; 452 } 453 454 vchan_cyclic_callback(&tdc->desc->vd); 455 456 spin_unlock(&tdc->vc.lock); 457 458 return IRQ_HANDLED; 459 } 460 461 static void tegra_adma_issue_pending(struct dma_chan *dc) 462 { 463 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 464 unsigned long flags; 465 466 spin_lock_irqsave(&tdc->vc.lock, flags); 467 468 if (vchan_issue_pending(&tdc->vc)) { 469 if (!tdc->desc) 470 tegra_adma_start(tdc); 471 } 472 473 spin_unlock_irqrestore(&tdc->vc.lock, flags); 474 } 475 476 static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc) 477 { 478 u32 csts; 479 480 csts = tdma_ch_read(tdc, ADMA_CH_STATUS); 481 csts &= ADMA_CH_STATUS_XFER_PAUSED; 482 483 return csts ? true : false; 484 } 485 486 static int tegra_adma_pause(struct dma_chan *dc) 487 { 488 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 489 struct tegra_adma_desc *desc = tdc->desc; 490 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 491 int dcnt = 10; 492 493 ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 494 ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); 495 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 496 497 while (dcnt-- && !tegra_adma_is_paused(tdc)) 498 udelay(TEGRA_ADMA_BURST_COMPLETE_TIME); 499 500 if (dcnt < 0) { 501 dev_err(tdc2dev(tdc), "unable to pause DMA channel\n"); 502 return -EBUSY; 503 } 504 505 return 0; 506 } 507 508 static int tegra_adma_resume(struct dma_chan *dc) 509 { 510 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 511 struct tegra_adma_desc *desc = tdc->desc; 512 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 513 514 ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 515 ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); 516 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 517 518 return 0; 519 } 520 521 static int tegra_adma_terminate_all(struct dma_chan *dc) 522 { 523 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 524 unsigned long flags; 525 LIST_HEAD(head); 526 527 spin_lock_irqsave(&tdc->vc.lock, flags); 528 529 if (tdc->desc) 530 tegra_adma_stop(tdc); 531 532 tegra_adma_request_free(tdc); 533 vchan_get_all_descriptors(&tdc->vc, &head); 534 spin_unlock_irqrestore(&tdc->vc.lock, flags); 535 vchan_dma_desc_free_list(&tdc->vc, &head); 536 537 return 0; 538 } 539 540 static enum dma_status tegra_adma_tx_status(struct dma_chan *dc, 541 dma_cookie_t cookie, 542 struct dma_tx_state *txstate) 543 { 544 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 545 struct tegra_adma_desc *desc; 546 struct virt_dma_desc *vd; 547 enum dma_status ret; 548 unsigned long flags; 549 unsigned int residual; 550 551 ret = dma_cookie_status(dc, cookie, txstate); 552 if (ret == DMA_COMPLETE || !txstate) 553 return ret; 554 555 spin_lock_irqsave(&tdc->vc.lock, flags); 556 557 vd = vchan_find_desc(&tdc->vc, cookie); 558 if (vd) { 559 desc = to_tegra_adma_desc(&vd->tx); 560 residual = desc->ch_regs.tc; 561 } else if (tdc->desc && tdc->desc->vd.tx.cookie == cookie) { 562 residual = tegra_adma_get_residue(tdc); 563 } else { 564 residual = 0; 565 } 566 567 spin_unlock_irqrestore(&tdc->vc.lock, flags); 568 569 dma_set_residue(txstate, residual); 570 571 return ret; 572 } 573 574 static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size) 575 { 576 if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) 577 burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; 578 579 return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; 580 } 581 582 static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size) 583 { 584 if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) 585 burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; 586 587 return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; 588 } 589 590 static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, 591 struct tegra_adma_desc *desc, 592 dma_addr_t buf_addr, 593 enum dma_transfer_direction direction) 594 { 595 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 596 const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata; 597 unsigned int burst_size, adma_dir, fifo_size_shift; 598 599 if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) 600 return -EINVAL; 601 602 switch (direction) { 603 case DMA_MEM_TO_DEV: 604 fifo_size_shift = ADMA_CH_TX_FIFO_SIZE_SHIFT; 605 adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; 606 burst_size = tdc->sconfig.dst_maxburst; 607 ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); 608 ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, 609 cdata->ch_req_mask, 610 cdata->ch_req_tx_shift); 611 ch_regs->src_addr = buf_addr; 612 break; 613 614 case DMA_DEV_TO_MEM: 615 fifo_size_shift = ADMA_CH_RX_FIFO_SIZE_SHIFT; 616 adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; 617 burst_size = tdc->sconfig.src_maxburst; 618 ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); 619 ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, 620 cdata->ch_req_mask, 621 cdata->ch_req_rx_shift); 622 ch_regs->trg_addr = buf_addr; 623 break; 624 625 default: 626 dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); 627 return -EINVAL; 628 } 629 630 ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) | 631 ADMA_CH_CTRL_MODE_CONTINUOUS | 632 ADMA_CH_CTRL_FLOWCTRL_EN; 633 ch_regs->config |= cdata->adma_get_burst_config(burst_size); 634 ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); 635 if (cdata->has_outstanding_reqs) 636 ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); 637 638 /* 639 * 'sreq_index' represents the current ADMAIF channel number and as per 640 * HW recommendation its FIFO size should match with the corresponding 641 * ADMA channel. 642 * 643 * ADMA FIFO size is set as per below (based on default ADMAIF channel 644 * FIFO sizes): 645 * fifo_size = 0x2 (sreq_index > sreq_index_offset) 646 * fifo_size = 0x3 (sreq_index <= sreq_index_offset) 647 * 648 */ 649 if (tdc->sreq_index > cdata->sreq_index_offset) 650 ch_regs->fifo_ctrl = 651 ADMA_CH_REG_FIELD_VAL(2, cdata->ch_fifo_size_mask, 652 fifo_size_shift); 653 else 654 ch_regs->fifo_ctrl = 655 ADMA_CH_REG_FIELD_VAL(3, cdata->ch_fifo_size_mask, 656 fifo_size_shift); 657 658 ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; 659 660 return tegra_adma_request_alloc(tdc, direction); 661 } 662 663 static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic( 664 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 665 size_t period_len, enum dma_transfer_direction direction, 666 unsigned long flags) 667 { 668 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 669 struct tegra_adma_desc *desc = NULL; 670 671 if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) { 672 dev_err(tdc2dev(tdc), "invalid buffer/period len\n"); 673 return NULL; 674 } 675 676 if (buf_len % period_len) { 677 dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n"); 678 return NULL; 679 } 680 681 if (!IS_ALIGNED(buf_addr, 4)) { 682 dev_err(tdc2dev(tdc), "invalid buffer alignment\n"); 683 return NULL; 684 } 685 686 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 687 if (!desc) 688 return NULL; 689 690 desc->buf_len = buf_len; 691 desc->period_len = period_len; 692 desc->num_periods = buf_len / period_len; 693 694 if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) { 695 kfree(desc); 696 return NULL; 697 } 698 699 return vchan_tx_prep(&tdc->vc, &desc->vd, flags); 700 } 701 702 static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) 703 { 704 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 705 int ret; 706 707 ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc); 708 if (ret) { 709 dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n", 710 dma_chan_name(dc)); 711 return ret; 712 } 713 714 ret = pm_runtime_resume_and_get(tdc2dev(tdc)); 715 if (ret < 0) { 716 free_irq(tdc->irq, tdc); 717 return ret; 718 } 719 720 dma_cookie_init(&tdc->vc.chan); 721 722 return 0; 723 } 724 725 static void tegra_adma_free_chan_resources(struct dma_chan *dc) 726 { 727 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 728 729 tegra_adma_terminate_all(dc); 730 vchan_free_chan_resources(&tdc->vc); 731 tasklet_kill(&tdc->vc.task); 732 free_irq(tdc->irq, tdc); 733 pm_runtime_put(tdc2dev(tdc)); 734 735 tdc->sreq_index = 0; 736 tdc->sreq_dir = DMA_TRANS_NONE; 737 } 738 739 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, 740 struct of_dma *ofdma) 741 { 742 struct tegra_adma *tdma = ofdma->of_dma_data; 743 struct tegra_adma_chan *tdc; 744 struct dma_chan *chan; 745 unsigned int sreq_index; 746 747 if (dma_spec->args_count != 1) 748 return NULL; 749 750 sreq_index = dma_spec->args[0]; 751 752 if (sreq_index == 0) { 753 dev_err(tdma->dev, "DMA request must not be 0\n"); 754 return NULL; 755 } 756 757 chan = dma_get_any_slave_channel(&tdma->dma_dev); 758 if (!chan) 759 return NULL; 760 761 tdc = to_tegra_adma_chan(chan); 762 tdc->sreq_index = sreq_index; 763 764 return chan; 765 } 766 767 static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev) 768 { 769 struct tegra_adma *tdma = dev_get_drvdata(dev); 770 struct tegra_adma_chan_regs *ch_reg; 771 struct tegra_adma_chan *tdc; 772 int i; 773 774 if (tdma->base_addr) 775 tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); 776 777 if (!tdma->global_cmd) 778 goto clk_disable; 779 780 for (i = 0; i < tdma->nr_channels; i++) { 781 tdc = &tdma->channels[i]; 782 /* skip for reserved channels */ 783 if (!tdc->tdma) 784 continue; 785 786 ch_reg = &tdc->ch_regs; 787 ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD); 788 /* skip if channel is not active */ 789 if (!ch_reg->cmd) 790 continue; 791 ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC); 792 ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR); 793 ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR); 794 ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 795 ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL); 796 ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG); 797 } 798 799 clk_disable: 800 clk_disable_unprepare(tdma->ahub_clk); 801 802 return 0; 803 } 804 805 static int __maybe_unused tegra_adma_runtime_resume(struct device *dev) 806 { 807 struct tegra_adma *tdma = dev_get_drvdata(dev); 808 struct tegra_adma_chan_regs *ch_reg; 809 struct tegra_adma_chan *tdc; 810 int ret, i; 811 812 ret = clk_prepare_enable(tdma->ahub_clk); 813 if (ret) { 814 dev_err(dev, "ahub clk_enable failed: %d\n", ret); 815 return ret; 816 } 817 if (tdma->base_addr) { 818 tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); 819 if (tdma->cdata->set_global_pg_config) 820 tdma->cdata->set_global_pg_config(tdma); 821 } 822 823 if (!tdma->global_cmd) 824 return 0; 825 826 for (i = 0; i < tdma->nr_channels; i++) { 827 tdc = &tdma->channels[i]; 828 /* skip for reserved channels */ 829 if (!tdc->tdma) 830 continue; 831 ch_reg = &tdc->ch_regs; 832 /* skip if channel was not active earlier */ 833 if (!ch_reg->cmd) 834 continue; 835 tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc); 836 tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr); 837 tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr); 838 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl); 839 tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl); 840 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config); 841 tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd); 842 } 843 844 return 0; 845 } 846 847 static const struct tegra_adma_chip_data tegra210_chip_data = { 848 .adma_get_burst_config = tegra210_adma_get_burst_config, 849 .global_reg_offset = 0xc00, 850 .global_int_clear = 0x20, 851 .ch_req_tx_shift = 28, 852 .ch_req_rx_shift = 24, 853 .ch_base_offset = 0, 854 .ch_req_mask = 0xf, 855 .ch_req_max = 10, 856 .ch_reg_size = 0x80, 857 .nr_channels = 22, 858 .ch_fifo_size_mask = 0xf, 859 .sreq_index_offset = 2, 860 .max_page = 0, 861 .has_outstanding_reqs = false, 862 .set_global_pg_config = NULL, 863 }; 864 865 static const struct tegra_adma_chip_data tegra186_chip_data = { 866 .adma_get_burst_config = tegra186_adma_get_burst_config, 867 .global_reg_offset = 0, 868 .global_int_clear = 0x402c, 869 .ch_req_tx_shift = 27, 870 .ch_req_rx_shift = 22, 871 .ch_base_offset = 0x10000, 872 .ch_req_mask = 0x1f, 873 .ch_req_max = 20, 874 .ch_reg_size = 0x100, 875 .nr_channels = 32, 876 .ch_fifo_size_mask = 0x1f, 877 .sreq_index_offset = 4, 878 .max_page = 4, 879 .has_outstanding_reqs = true, 880 .set_global_pg_config = tegra186_adma_global_page_config, 881 }; 882 883 static const struct of_device_id tegra_adma_of_match[] = { 884 { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, 885 { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data }, 886 { }, 887 }; 888 MODULE_DEVICE_TABLE(of, tegra_adma_of_match); 889 890 static int tegra_adma_probe(struct platform_device *pdev) 891 { 892 const struct tegra_adma_chip_data *cdata; 893 struct tegra_adma *tdma; 894 struct resource *res_page, *res_base; 895 int ret, i; 896 897 cdata = of_device_get_match_data(&pdev->dev); 898 if (!cdata) { 899 dev_err(&pdev->dev, "device match data not found\n"); 900 return -ENODEV; 901 } 902 903 tdma = devm_kzalloc(&pdev->dev, 904 struct_size(tdma, channels, cdata->nr_channels), 905 GFP_KERNEL); 906 if (!tdma) 907 return -ENOMEM; 908 909 tdma->dev = &pdev->dev; 910 tdma->cdata = cdata; 911 tdma->nr_channels = cdata->nr_channels; 912 platform_set_drvdata(pdev, tdma); 913 914 res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page"); 915 if (res_page) { 916 tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page); 917 if (IS_ERR(tdma->ch_base_addr)) 918 return PTR_ERR(tdma->ch_base_addr); 919 920 res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global"); 921 if (res_base) { 922 resource_size_t page_offset, page_no; 923 unsigned int ch_base_offset; 924 925 if (res_page->start < res_base->start) 926 return -EINVAL; 927 page_offset = res_page->start - res_base->start; 928 ch_base_offset = cdata->ch_base_offset; 929 if (!ch_base_offset) 930 return -EINVAL; 931 932 page_no = div_u64(page_offset, ch_base_offset); 933 if (!page_no || page_no > INT_MAX) 934 return -EINVAL; 935 936 tdma->ch_page_no = page_no - 1; 937 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base); 938 if (IS_ERR(tdma->base_addr)) 939 return PTR_ERR(tdma->base_addr); 940 } 941 } else { 942 /* If no 'page' property found, then reg DT binding would be legacy */ 943 res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 944 if (res_base) { 945 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base); 946 if (IS_ERR(tdma->base_addr)) 947 return PTR_ERR(tdma->base_addr); 948 } else { 949 return -ENODEV; 950 } 951 952 tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset; 953 } 954 955 tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio"); 956 if (IS_ERR(tdma->ahub_clk)) { 957 dev_err(&pdev->dev, "Error: Missing ahub controller clock\n"); 958 return PTR_ERR(tdma->ahub_clk); 959 } 960 961 tdma->dma_chan_mask = devm_kzalloc(&pdev->dev, 962 BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long), 963 GFP_KERNEL); 964 if (!tdma->dma_chan_mask) 965 return -ENOMEM; 966 967 /* Enable all channels by default */ 968 bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels); 969 970 ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask", 971 (u32 *)tdma->dma_chan_mask, 972 BITS_TO_U32(tdma->nr_channels)); 973 if (ret < 0 && (ret != -EINVAL)) { 974 dev_err(&pdev->dev, "dma-channel-mask is not complete.\n"); 975 return ret; 976 } 977 978 INIT_LIST_HEAD(&tdma->dma_dev.channels); 979 for (i = 0; i < tdma->nr_channels; i++) { 980 struct tegra_adma_chan *tdc = &tdma->channels[i]; 981 982 /* skip for reserved channels */ 983 if (!test_bit(i, tdma->dma_chan_mask)) 984 continue; 985 986 tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i); 987 988 tdc->irq = of_irq_get(pdev->dev.of_node, i); 989 if (tdc->irq <= 0) { 990 ret = tdc->irq ?: -ENXIO; 991 goto irq_dispose; 992 } 993 994 vchan_init(&tdc->vc, &tdma->dma_dev); 995 tdc->vc.desc_free = tegra_adma_desc_free; 996 tdc->tdma = tdma; 997 } 998 999 pm_runtime_enable(&pdev->dev); 1000 1001 ret = pm_runtime_resume_and_get(&pdev->dev); 1002 if (ret < 0) 1003 goto rpm_disable; 1004 1005 ret = tegra_adma_init(tdma); 1006 if (ret) 1007 goto rpm_put; 1008 1009 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1010 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1011 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1012 1013 tdma->dma_dev.dev = &pdev->dev; 1014 tdma->dma_dev.device_alloc_chan_resources = 1015 tegra_adma_alloc_chan_resources; 1016 tdma->dma_dev.device_free_chan_resources = 1017 tegra_adma_free_chan_resources; 1018 tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending; 1019 tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic; 1020 tdma->dma_dev.device_config = tegra_adma_slave_config; 1021 tdma->dma_dev.device_tx_status = tegra_adma_tx_status; 1022 tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; 1023 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1024 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1025 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1026 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 1027 tdma->dma_dev.device_pause = tegra_adma_pause; 1028 tdma->dma_dev.device_resume = tegra_adma_resume; 1029 1030 ret = dma_async_device_register(&tdma->dma_dev); 1031 if (ret < 0) { 1032 dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); 1033 goto rpm_put; 1034 } 1035 1036 ret = of_dma_controller_register(pdev->dev.of_node, 1037 tegra_dma_of_xlate, tdma); 1038 if (ret < 0) { 1039 dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret); 1040 goto dma_remove; 1041 } 1042 1043 pm_runtime_put(&pdev->dev); 1044 1045 dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n", 1046 tdma->nr_channels); 1047 1048 return 0; 1049 1050 dma_remove: 1051 dma_async_device_unregister(&tdma->dma_dev); 1052 rpm_put: 1053 pm_runtime_put_sync(&pdev->dev); 1054 rpm_disable: 1055 pm_runtime_disable(&pdev->dev); 1056 irq_dispose: 1057 while (--i >= 0) 1058 irq_dispose_mapping(tdma->channels[i].irq); 1059 1060 return ret; 1061 } 1062 1063 static void tegra_adma_remove(struct platform_device *pdev) 1064 { 1065 struct tegra_adma *tdma = platform_get_drvdata(pdev); 1066 int i; 1067 1068 of_dma_controller_free(pdev->dev.of_node); 1069 dma_async_device_unregister(&tdma->dma_dev); 1070 1071 for (i = 0; i < tdma->nr_channels; ++i) { 1072 if (tdma->channels[i].irq) 1073 irq_dispose_mapping(tdma->channels[i].irq); 1074 } 1075 1076 pm_runtime_disable(&pdev->dev); 1077 } 1078 1079 static const struct dev_pm_ops tegra_adma_dev_pm_ops = { 1080 SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend, 1081 tegra_adma_runtime_resume, NULL) 1082 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1083 pm_runtime_force_resume) 1084 }; 1085 1086 static struct platform_driver tegra_admac_driver = { 1087 .driver = { 1088 .name = "tegra-adma", 1089 .pm = &tegra_adma_dev_pm_ops, 1090 .of_match_table = tegra_adma_of_match, 1091 }, 1092 .probe = tegra_adma_probe, 1093 .remove = tegra_adma_remove, 1094 }; 1095 1096 module_platform_driver(tegra_admac_driver); 1097 1098 MODULE_ALIAS("platform:tegra210-adma"); 1099 MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver"); 1100 MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>"); 1101 MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>"); 1102 MODULE_LICENSE("GPL v2"); 1103