1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ADMA driver for Nvidia's Tegra210 ADMA controller. 4 * 5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/iopoll.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_dma.h> 13 #include <linux/of_irq.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/slab.h> 17 18 #include "virt-dma.h" 19 20 #define ADMA_CH_CMD 0x00 21 #define ADMA_CH_STATUS 0x0c 22 #define ADMA_CH_STATUS_XFER_EN BIT(0) 23 #define ADMA_CH_STATUS_XFER_PAUSED BIT(1) 24 25 #define ADMA_CH_INT_STATUS 0x10 26 #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0) 27 28 #define ADMA_CH_INT_CLEAR 0x1c 29 #define ADMA_CH_CTRL 0x24 30 #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12) 31 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2 32 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 33 #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8) 34 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) 35 #define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0 36 37 #define ADMA_CH_CONFIG 0x28 38 #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) 39 #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24) 40 #define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20 41 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 42 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) 43 #define ADMA_CH_CONFIG_MAX_BUFS 8 44 #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) 45 46 #define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30 47 #define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70 48 #define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84 49 50 #define ADMA_CH_FIFO_CTRL 0x2c 51 #define ADMA_CH_TX_FIFO_SIZE_SHIFT 8 52 #define ADMA_CH_RX_FIFO_SIZE_SHIFT 0 53 54 #define ADMA_CH_LOWER_SRC_ADDR 0x34 55 #define ADMA_CH_LOWER_TRG_ADDR 0x3c 56 #define ADMA_CH_TC 0x44 57 #define ADMA_CH_TC_COUNT_MASK 0x3ffffffc 58 59 #define ADMA_CH_XFER_STATUS 0x54 60 #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff 61 62 #define ADMA_GLOBAL_CMD 0x00 63 #define ADMA_GLOBAL_SOFT_RESET 0x04 64 65 #define TEGRA_ADMA_BURST_COMPLETE_TIME 20 66 67 #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) 68 69 struct tegra_adma; 70 71 /* 72 * struct tegra_adma_chip_data - Tegra chip specific data 73 * @adma_get_burst_config: Function callback used to set DMA burst size. 74 * @global_reg_offset: Register offset of DMA global register. 75 * @global_int_clear: Register offset of DMA global interrupt clear. 76 * @ch_req_tx_shift: Register offset for AHUB transmit channel select. 77 * @ch_req_rx_shift: Register offset for AHUB receive channel select. 78 * @ch_base_offset: Register offset of DMA channel registers. 79 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. 80 * @ch_req_mask: Mask for Tx or Rx channel select. 81 * @ch_req_max: Maximum number of Tx or Rx channels available. 82 * @ch_reg_size: Size of DMA channel register space. 83 * @nr_channels: Number of DMA channels available. 84 * @ch_fifo_size_mask: Mask for FIFO size field. 85 * @sreq_index_offset: Slave channel index offset. 86 * @has_outstanding_reqs: If DMA channel can have outstanding requests. 87 */ 88 struct tegra_adma_chip_data { 89 unsigned int (*adma_get_burst_config)(unsigned int burst_size); 90 unsigned int global_reg_offset; 91 unsigned int global_int_clear; 92 unsigned int ch_req_tx_shift; 93 unsigned int ch_req_rx_shift; 94 unsigned int ch_base_offset; 95 unsigned int ch_fifo_ctrl; 96 unsigned int ch_req_mask; 97 unsigned int ch_req_max; 98 unsigned int ch_reg_size; 99 unsigned int nr_channels; 100 unsigned int ch_fifo_size_mask; 101 unsigned int sreq_index_offset; 102 bool has_outstanding_reqs; 103 void (*set_global_pg_config)(struct tegra_adma *tdma); 104 }; 105 106 /* 107 * struct tegra_adma_chan_regs - Tegra ADMA channel registers 108 */ 109 struct tegra_adma_chan_regs { 110 unsigned int ctrl; 111 unsigned int config; 112 unsigned int src_addr; 113 unsigned int trg_addr; 114 unsigned int fifo_ctrl; 115 unsigned int cmd; 116 unsigned int tc; 117 }; 118 119 /* 120 * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests. 121 */ 122 struct tegra_adma_desc { 123 struct virt_dma_desc vd; 124 struct tegra_adma_chan_regs ch_regs; 125 size_t buf_len; 126 size_t period_len; 127 size_t num_periods; 128 }; 129 130 /* 131 * struct tegra_adma_chan - Tegra ADMA channel information 132 */ 133 struct tegra_adma_chan { 134 struct virt_dma_chan vc; 135 struct tegra_adma_desc *desc; 136 struct tegra_adma *tdma; 137 int irq; 138 void __iomem *chan_addr; 139 140 /* Slave channel configuration info */ 141 struct dma_slave_config sconfig; 142 enum dma_transfer_direction sreq_dir; 143 unsigned int sreq_index; 144 bool sreq_reserved; 145 struct tegra_adma_chan_regs ch_regs; 146 147 /* Transfer count and position info */ 148 unsigned int tx_buf_count; 149 unsigned int tx_buf_pos; 150 }; 151 152 /* 153 * struct tegra_adma - Tegra ADMA controller information 154 */ 155 struct tegra_adma { 156 struct dma_device dma_dev; 157 struct device *dev; 158 void __iomem *base_addr; 159 void __iomem *ch_base_addr; 160 struct clk *ahub_clk; 161 unsigned int nr_channels; 162 unsigned long *dma_chan_mask; 163 unsigned long rx_requests_reserved; 164 unsigned long tx_requests_reserved; 165 166 /* Used to store global command register state when suspending */ 167 unsigned int global_cmd; 168 unsigned int ch_page_no; 169 170 const struct tegra_adma_chip_data *cdata; 171 172 /* Last member of the structure */ 173 struct tegra_adma_chan channels[] __counted_by(nr_channels); 174 }; 175 176 static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) 177 { 178 writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg); 179 } 180 181 static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg) 182 { 183 return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg); 184 } 185 186 static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val) 187 { 188 writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg); 189 } 190 191 static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val) 192 { 193 writel(val, tdc->chan_addr + reg); 194 } 195 196 static inline u32 tdma_ch_read(struct tegra_adma_chan *tdc, u32 reg) 197 { 198 return readl(tdc->chan_addr + reg); 199 } 200 201 static inline struct tegra_adma_chan *to_tegra_adma_chan(struct dma_chan *dc) 202 { 203 return container_of(dc, struct tegra_adma_chan, vc.chan); 204 } 205 206 static inline struct tegra_adma_desc *to_tegra_adma_desc( 207 struct dma_async_tx_descriptor *td) 208 { 209 return container_of(td, struct tegra_adma_desc, vd.tx); 210 } 211 212 static inline struct device *tdc2dev(struct tegra_adma_chan *tdc) 213 { 214 return tdc->tdma->dev; 215 } 216 217 static void tegra_adma_desc_free(struct virt_dma_desc *vd) 218 { 219 kfree(container_of(vd, struct tegra_adma_desc, vd)); 220 } 221 222 static int tegra_adma_slave_config(struct dma_chan *dc, 223 struct dma_slave_config *sconfig) 224 { 225 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 226 227 memcpy(&tdc->sconfig, sconfig, sizeof(*sconfig)); 228 229 return 0; 230 } 231 232 static void tegra186_adma_global_page_config(struct tegra_adma *tdma) 233 { 234 /* 235 * Clear the default page1 channel group configs and program 236 * the global registers based on the actual page usage 237 */ 238 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0); 239 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0); 240 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0); 241 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff); 242 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff); 243 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff); 244 } 245 246 static int tegra_adma_init(struct tegra_adma *tdma) 247 { 248 u32 status; 249 int ret; 250 251 /* Clear any channels group global interrupts */ 252 tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1); 253 254 if (!tdma->base_addr) 255 return 0; 256 257 /* Assert soft reset */ 258 tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); 259 260 /* Wait for reset to clear */ 261 ret = readx_poll_timeout(readl, 262 tdma->base_addr + 263 tdma->cdata->global_reg_offset + 264 ADMA_GLOBAL_SOFT_RESET, 265 status, status == 0, 20, 10000); 266 if (ret) 267 return ret; 268 269 if (tdma->cdata->set_global_pg_config) 270 tdma->cdata->set_global_pg_config(tdma); 271 272 /* Enable global ADMA registers */ 273 tdma_write(tdma, ADMA_GLOBAL_CMD, 1); 274 275 return 0; 276 } 277 278 static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, 279 enum dma_transfer_direction direction) 280 { 281 struct tegra_adma *tdma = tdc->tdma; 282 unsigned int sreq_index = tdc->sreq_index; 283 284 if (tdc->sreq_reserved) 285 return tdc->sreq_dir == direction ? 0 : -EINVAL; 286 287 if (sreq_index > tdma->cdata->ch_req_max) { 288 dev_err(tdma->dev, "invalid DMA request\n"); 289 return -EINVAL; 290 } 291 292 switch (direction) { 293 case DMA_MEM_TO_DEV: 294 if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { 295 dev_err(tdma->dev, "DMA request reserved\n"); 296 return -EINVAL; 297 } 298 break; 299 300 case DMA_DEV_TO_MEM: 301 if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { 302 dev_err(tdma->dev, "DMA request reserved\n"); 303 return -EINVAL; 304 } 305 break; 306 307 default: 308 dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", 309 dma_chan_name(&tdc->vc.chan)); 310 return -EINVAL; 311 } 312 313 tdc->sreq_dir = direction; 314 tdc->sreq_reserved = true; 315 316 return 0; 317 } 318 319 static void tegra_adma_request_free(struct tegra_adma_chan *tdc) 320 { 321 struct tegra_adma *tdma = tdc->tdma; 322 323 if (!tdc->sreq_reserved) 324 return; 325 326 switch (tdc->sreq_dir) { 327 case DMA_MEM_TO_DEV: 328 clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved); 329 break; 330 331 case DMA_DEV_TO_MEM: 332 clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved); 333 break; 334 335 default: 336 dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", 337 dma_chan_name(&tdc->vc.chan)); 338 return; 339 } 340 341 tdc->sreq_reserved = false; 342 } 343 344 static u32 tegra_adma_irq_status(struct tegra_adma_chan *tdc) 345 { 346 u32 status = tdma_ch_read(tdc, ADMA_CH_INT_STATUS); 347 348 return status & ADMA_CH_INT_STATUS_XFER_DONE; 349 } 350 351 static u32 tegra_adma_irq_clear(struct tegra_adma_chan *tdc) 352 { 353 u32 status = tegra_adma_irq_status(tdc); 354 355 if (status) 356 tdma_ch_write(tdc, ADMA_CH_INT_CLEAR, status); 357 358 return status; 359 } 360 361 static void tegra_adma_stop(struct tegra_adma_chan *tdc) 362 { 363 unsigned int status; 364 365 /* Disable ADMA */ 366 tdma_ch_write(tdc, ADMA_CH_CMD, 0); 367 368 /* Clear interrupt status */ 369 tegra_adma_irq_clear(tdc); 370 371 if (readx_poll_timeout_atomic(readl, tdc->chan_addr + ADMA_CH_STATUS, 372 status, !(status & ADMA_CH_STATUS_XFER_EN), 373 20, 10000)) { 374 dev_err(tdc2dev(tdc), "unable to stop DMA channel\n"); 375 return; 376 } 377 378 kfree(tdc->desc); 379 tdc->desc = NULL; 380 } 381 382 static void tegra_adma_start(struct tegra_adma_chan *tdc) 383 { 384 struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc); 385 struct tegra_adma_chan_regs *ch_regs; 386 struct tegra_adma_desc *desc; 387 388 if (!vd) 389 return; 390 391 list_del(&vd->node); 392 393 desc = to_tegra_adma_desc(&vd->tx); 394 395 if (!desc) { 396 dev_warn(tdc2dev(tdc), "unable to start DMA, no descriptor\n"); 397 return; 398 } 399 400 ch_regs = &desc->ch_regs; 401 402 tdc->tx_buf_pos = 0; 403 tdc->tx_buf_count = 0; 404 tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc); 405 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 406 tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr); 407 tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr); 408 tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl); 409 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config); 410 411 /* Start ADMA */ 412 tdma_ch_write(tdc, ADMA_CH_CMD, 1); 413 414 tdc->desc = desc; 415 } 416 417 static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc) 418 { 419 struct tegra_adma_desc *desc = tdc->desc; 420 unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1; 421 unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS); 422 unsigned int periods_remaining; 423 424 /* 425 * Handle wrap around of buffer count register 426 */ 427 if (pos < tdc->tx_buf_pos) 428 tdc->tx_buf_count += pos + (max - tdc->tx_buf_pos); 429 else 430 tdc->tx_buf_count += pos - tdc->tx_buf_pos; 431 432 periods_remaining = tdc->tx_buf_count % desc->num_periods; 433 tdc->tx_buf_pos = pos; 434 435 return desc->buf_len - (periods_remaining * desc->period_len); 436 } 437 438 static irqreturn_t tegra_adma_isr(int irq, void *dev_id) 439 { 440 struct tegra_adma_chan *tdc = dev_id; 441 unsigned long status; 442 443 spin_lock(&tdc->vc.lock); 444 445 status = tegra_adma_irq_clear(tdc); 446 if (status == 0 || !tdc->desc) { 447 spin_unlock(&tdc->vc.lock); 448 return IRQ_NONE; 449 } 450 451 vchan_cyclic_callback(&tdc->desc->vd); 452 453 spin_unlock(&tdc->vc.lock); 454 455 return IRQ_HANDLED; 456 } 457 458 static void tegra_adma_issue_pending(struct dma_chan *dc) 459 { 460 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 461 unsigned long flags; 462 463 spin_lock_irqsave(&tdc->vc.lock, flags); 464 465 if (vchan_issue_pending(&tdc->vc)) { 466 if (!tdc->desc) 467 tegra_adma_start(tdc); 468 } 469 470 spin_unlock_irqrestore(&tdc->vc.lock, flags); 471 } 472 473 static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc) 474 { 475 u32 csts; 476 477 csts = tdma_ch_read(tdc, ADMA_CH_STATUS); 478 csts &= ADMA_CH_STATUS_XFER_PAUSED; 479 480 return csts ? true : false; 481 } 482 483 static int tegra_adma_pause(struct dma_chan *dc) 484 { 485 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 486 struct tegra_adma_desc *desc = tdc->desc; 487 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 488 int dcnt = 10; 489 490 ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 491 ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); 492 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 493 494 while (dcnt-- && !tegra_adma_is_paused(tdc)) 495 udelay(TEGRA_ADMA_BURST_COMPLETE_TIME); 496 497 if (dcnt < 0) { 498 dev_err(tdc2dev(tdc), "unable to pause DMA channel\n"); 499 return -EBUSY; 500 } 501 502 return 0; 503 } 504 505 static int tegra_adma_resume(struct dma_chan *dc) 506 { 507 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 508 struct tegra_adma_desc *desc = tdc->desc; 509 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 510 511 ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 512 ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT); 513 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 514 515 return 0; 516 } 517 518 static int tegra_adma_terminate_all(struct dma_chan *dc) 519 { 520 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 521 unsigned long flags; 522 LIST_HEAD(head); 523 524 spin_lock_irqsave(&tdc->vc.lock, flags); 525 526 if (tdc->desc) 527 tegra_adma_stop(tdc); 528 529 tegra_adma_request_free(tdc); 530 vchan_get_all_descriptors(&tdc->vc, &head); 531 spin_unlock_irqrestore(&tdc->vc.lock, flags); 532 vchan_dma_desc_free_list(&tdc->vc, &head); 533 534 return 0; 535 } 536 537 static enum dma_status tegra_adma_tx_status(struct dma_chan *dc, 538 dma_cookie_t cookie, 539 struct dma_tx_state *txstate) 540 { 541 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 542 struct tegra_adma_desc *desc; 543 struct virt_dma_desc *vd; 544 enum dma_status ret; 545 unsigned long flags; 546 unsigned int residual; 547 548 ret = dma_cookie_status(dc, cookie, txstate); 549 if (ret == DMA_COMPLETE || !txstate) 550 return ret; 551 552 spin_lock_irqsave(&tdc->vc.lock, flags); 553 554 vd = vchan_find_desc(&tdc->vc, cookie); 555 if (vd) { 556 desc = to_tegra_adma_desc(&vd->tx); 557 residual = desc->ch_regs.tc; 558 } else if (tdc->desc && tdc->desc->vd.tx.cookie == cookie) { 559 residual = tegra_adma_get_residue(tdc); 560 } else { 561 residual = 0; 562 } 563 564 spin_unlock_irqrestore(&tdc->vc.lock, flags); 565 566 dma_set_residue(txstate, residual); 567 568 return ret; 569 } 570 571 static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size) 572 { 573 if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) 574 burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; 575 576 return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; 577 } 578 579 static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size) 580 { 581 if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE) 582 burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE; 583 584 return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT; 585 } 586 587 static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, 588 struct tegra_adma_desc *desc, 589 dma_addr_t buf_addr, 590 enum dma_transfer_direction direction) 591 { 592 struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; 593 const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata; 594 unsigned int burst_size, adma_dir, fifo_size_shift; 595 596 if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) 597 return -EINVAL; 598 599 switch (direction) { 600 case DMA_MEM_TO_DEV: 601 fifo_size_shift = ADMA_CH_TX_FIFO_SIZE_SHIFT; 602 adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; 603 burst_size = tdc->sconfig.dst_maxburst; 604 ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); 605 ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, 606 cdata->ch_req_mask, 607 cdata->ch_req_tx_shift); 608 ch_regs->src_addr = buf_addr; 609 break; 610 611 case DMA_DEV_TO_MEM: 612 fifo_size_shift = ADMA_CH_RX_FIFO_SIZE_SHIFT; 613 adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; 614 burst_size = tdc->sconfig.src_maxburst; 615 ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); 616 ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index, 617 cdata->ch_req_mask, 618 cdata->ch_req_rx_shift); 619 ch_regs->trg_addr = buf_addr; 620 break; 621 622 default: 623 dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); 624 return -EINVAL; 625 } 626 627 ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) | 628 ADMA_CH_CTRL_MODE_CONTINUOUS | 629 ADMA_CH_CTRL_FLOWCTRL_EN; 630 ch_regs->config |= cdata->adma_get_burst_config(burst_size); 631 ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); 632 if (cdata->has_outstanding_reqs) 633 ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); 634 635 /* 636 * 'sreq_index' represents the current ADMAIF channel number and as per 637 * HW recommendation its FIFO size should match with the corresponding 638 * ADMA channel. 639 * 640 * ADMA FIFO size is set as per below (based on default ADMAIF channel 641 * FIFO sizes): 642 * fifo_size = 0x2 (sreq_index > sreq_index_offset) 643 * fifo_size = 0x3 (sreq_index <= sreq_index_offset) 644 * 645 */ 646 if (tdc->sreq_index > cdata->sreq_index_offset) 647 ch_regs->fifo_ctrl = 648 ADMA_CH_REG_FIELD_VAL(2, cdata->ch_fifo_size_mask, 649 fifo_size_shift); 650 else 651 ch_regs->fifo_ctrl = 652 ADMA_CH_REG_FIELD_VAL(3, cdata->ch_fifo_size_mask, 653 fifo_size_shift); 654 655 ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; 656 657 return tegra_adma_request_alloc(tdc, direction); 658 } 659 660 static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic( 661 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 662 size_t period_len, enum dma_transfer_direction direction, 663 unsigned long flags) 664 { 665 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 666 struct tegra_adma_desc *desc = NULL; 667 668 if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) { 669 dev_err(tdc2dev(tdc), "invalid buffer/period len\n"); 670 return NULL; 671 } 672 673 if (buf_len % period_len) { 674 dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n"); 675 return NULL; 676 } 677 678 if (!IS_ALIGNED(buf_addr, 4)) { 679 dev_err(tdc2dev(tdc), "invalid buffer alignment\n"); 680 return NULL; 681 } 682 683 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 684 if (!desc) 685 return NULL; 686 687 desc->buf_len = buf_len; 688 desc->period_len = period_len; 689 desc->num_periods = buf_len / period_len; 690 691 if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) { 692 kfree(desc); 693 return NULL; 694 } 695 696 return vchan_tx_prep(&tdc->vc, &desc->vd, flags); 697 } 698 699 static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) 700 { 701 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 702 int ret; 703 704 ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc); 705 if (ret) { 706 dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n", 707 dma_chan_name(dc)); 708 return ret; 709 } 710 711 ret = pm_runtime_resume_and_get(tdc2dev(tdc)); 712 if (ret < 0) { 713 free_irq(tdc->irq, tdc); 714 return ret; 715 } 716 717 dma_cookie_init(&tdc->vc.chan); 718 719 return 0; 720 } 721 722 static void tegra_adma_free_chan_resources(struct dma_chan *dc) 723 { 724 struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 725 726 tegra_adma_terminate_all(dc); 727 vchan_free_chan_resources(&tdc->vc); 728 tasklet_kill(&tdc->vc.task); 729 free_irq(tdc->irq, tdc); 730 pm_runtime_put(tdc2dev(tdc)); 731 732 tdc->sreq_index = 0; 733 tdc->sreq_dir = DMA_TRANS_NONE; 734 } 735 736 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, 737 struct of_dma *ofdma) 738 { 739 struct tegra_adma *tdma = ofdma->of_dma_data; 740 struct tegra_adma_chan *tdc; 741 struct dma_chan *chan; 742 unsigned int sreq_index; 743 744 if (dma_spec->args_count != 1) 745 return NULL; 746 747 sreq_index = dma_spec->args[0]; 748 749 if (sreq_index == 0) { 750 dev_err(tdma->dev, "DMA request must not be 0\n"); 751 return NULL; 752 } 753 754 chan = dma_get_any_slave_channel(&tdma->dma_dev); 755 if (!chan) 756 return NULL; 757 758 tdc = to_tegra_adma_chan(chan); 759 tdc->sreq_index = sreq_index; 760 761 return chan; 762 } 763 764 static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev) 765 { 766 struct tegra_adma *tdma = dev_get_drvdata(dev); 767 struct tegra_adma_chan_regs *ch_reg; 768 struct tegra_adma_chan *tdc; 769 int i; 770 771 if (tdma->base_addr) 772 tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); 773 774 if (!tdma->global_cmd) 775 goto clk_disable; 776 777 for (i = 0; i < tdma->nr_channels; i++) { 778 tdc = &tdma->channels[i]; 779 /* skip for reserved channels */ 780 if (!tdc->tdma) 781 continue; 782 783 ch_reg = &tdc->ch_regs; 784 ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD); 785 /* skip if channel is not active */ 786 if (!ch_reg->cmd) 787 continue; 788 ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC); 789 ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR); 790 ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR); 791 ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 792 ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL); 793 ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG); 794 } 795 796 clk_disable: 797 clk_disable_unprepare(tdma->ahub_clk); 798 799 return 0; 800 } 801 802 static int __maybe_unused tegra_adma_runtime_resume(struct device *dev) 803 { 804 struct tegra_adma *tdma = dev_get_drvdata(dev); 805 struct tegra_adma_chan_regs *ch_reg; 806 struct tegra_adma_chan *tdc; 807 int ret, i; 808 809 ret = clk_prepare_enable(tdma->ahub_clk); 810 if (ret) { 811 dev_err(dev, "ahub clk_enable failed: %d\n", ret); 812 return ret; 813 } 814 if (tdma->base_addr) { 815 tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); 816 if (tdma->cdata->set_global_pg_config) 817 tdma->cdata->set_global_pg_config(tdma); 818 } 819 820 if (!tdma->global_cmd) 821 return 0; 822 823 for (i = 0; i < tdma->nr_channels; i++) { 824 tdc = &tdma->channels[i]; 825 /* skip for reserved channels */ 826 if (!tdc->tdma) 827 continue; 828 ch_reg = &tdc->ch_regs; 829 /* skip if channel was not active earlier */ 830 if (!ch_reg->cmd) 831 continue; 832 tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc); 833 tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr); 834 tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr); 835 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl); 836 tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl); 837 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config); 838 tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd); 839 } 840 841 return 0; 842 } 843 844 static const struct tegra_adma_chip_data tegra210_chip_data = { 845 .adma_get_burst_config = tegra210_adma_get_burst_config, 846 .global_reg_offset = 0xc00, 847 .global_int_clear = 0x20, 848 .ch_req_tx_shift = 28, 849 .ch_req_rx_shift = 24, 850 .ch_base_offset = 0, 851 .ch_req_mask = 0xf, 852 .ch_req_max = 10, 853 .ch_reg_size = 0x80, 854 .nr_channels = 22, 855 .ch_fifo_size_mask = 0xf, 856 .sreq_index_offset = 2, 857 .has_outstanding_reqs = false, 858 .set_global_pg_config = NULL, 859 }; 860 861 static const struct tegra_adma_chip_data tegra186_chip_data = { 862 .adma_get_burst_config = tegra186_adma_get_burst_config, 863 .global_reg_offset = 0, 864 .global_int_clear = 0x402c, 865 .ch_req_tx_shift = 27, 866 .ch_req_rx_shift = 22, 867 .ch_base_offset = 0x10000, 868 .ch_req_mask = 0x1f, 869 .ch_req_max = 20, 870 .ch_reg_size = 0x100, 871 .nr_channels = 32, 872 .ch_fifo_size_mask = 0x1f, 873 .sreq_index_offset = 4, 874 .has_outstanding_reqs = true, 875 .set_global_pg_config = tegra186_adma_global_page_config, 876 }; 877 878 static const struct of_device_id tegra_adma_of_match[] = { 879 { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, 880 { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data }, 881 { }, 882 }; 883 MODULE_DEVICE_TABLE(of, tegra_adma_of_match); 884 885 static int tegra_adma_probe(struct platform_device *pdev) 886 { 887 const struct tegra_adma_chip_data *cdata; 888 struct tegra_adma *tdma; 889 struct resource *res_page, *res_base; 890 int ret, i; 891 892 cdata = of_device_get_match_data(&pdev->dev); 893 if (!cdata) { 894 dev_err(&pdev->dev, "device match data not found\n"); 895 return -ENODEV; 896 } 897 898 tdma = devm_kzalloc(&pdev->dev, 899 struct_size(tdma, channels, cdata->nr_channels), 900 GFP_KERNEL); 901 if (!tdma) 902 return -ENOMEM; 903 904 tdma->dev = &pdev->dev; 905 tdma->cdata = cdata; 906 tdma->nr_channels = cdata->nr_channels; 907 platform_set_drvdata(pdev, tdma); 908 909 res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page"); 910 if (res_page) { 911 tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page); 912 if (IS_ERR(tdma->ch_base_addr)) 913 return PTR_ERR(tdma->ch_base_addr); 914 915 res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global"); 916 if (res_base) { 917 resource_size_t page_offset, page_no; 918 unsigned int ch_base_offset; 919 920 if (res_page->start < res_base->start) 921 return -EINVAL; 922 page_offset = res_page->start - res_base->start; 923 ch_base_offset = cdata->ch_base_offset; 924 if (!ch_base_offset) 925 return -EINVAL; 926 927 page_no = div_u64(page_offset, ch_base_offset); 928 if (!page_no || page_no > INT_MAX) 929 return -EINVAL; 930 931 tdma->ch_page_no = page_no - 1; 932 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base); 933 if (IS_ERR(tdma->base_addr)) 934 return PTR_ERR(tdma->base_addr); 935 } 936 } else { 937 /* If no 'page' property found, then reg DT binding would be legacy */ 938 res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 939 if (res_base) { 940 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base); 941 if (IS_ERR(tdma->base_addr)) 942 return PTR_ERR(tdma->base_addr); 943 } else { 944 return -ENODEV; 945 } 946 947 tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset; 948 } 949 950 tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio"); 951 if (IS_ERR(tdma->ahub_clk)) { 952 dev_err(&pdev->dev, "Error: Missing ahub controller clock\n"); 953 return PTR_ERR(tdma->ahub_clk); 954 } 955 956 tdma->dma_chan_mask = devm_kzalloc(&pdev->dev, 957 BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long), 958 GFP_KERNEL); 959 if (!tdma->dma_chan_mask) 960 return -ENOMEM; 961 962 /* Enable all channels by default */ 963 bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels); 964 965 ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask", 966 (u32 *)tdma->dma_chan_mask, 967 BITS_TO_U32(tdma->nr_channels)); 968 if (ret < 0 && (ret != -EINVAL)) { 969 dev_err(&pdev->dev, "dma-channel-mask is not complete.\n"); 970 return ret; 971 } 972 973 INIT_LIST_HEAD(&tdma->dma_dev.channels); 974 for (i = 0; i < tdma->nr_channels; i++) { 975 struct tegra_adma_chan *tdc = &tdma->channels[i]; 976 977 /* skip for reserved channels */ 978 if (!test_bit(i, tdma->dma_chan_mask)) 979 continue; 980 981 tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i); 982 983 tdc->irq = of_irq_get(pdev->dev.of_node, i); 984 if (tdc->irq <= 0) { 985 ret = tdc->irq ?: -ENXIO; 986 goto irq_dispose; 987 } 988 989 vchan_init(&tdc->vc, &tdma->dma_dev); 990 tdc->vc.desc_free = tegra_adma_desc_free; 991 tdc->tdma = tdma; 992 } 993 994 pm_runtime_enable(&pdev->dev); 995 996 ret = pm_runtime_resume_and_get(&pdev->dev); 997 if (ret < 0) 998 goto rpm_disable; 999 1000 ret = tegra_adma_init(tdma); 1001 if (ret) 1002 goto rpm_put; 1003 1004 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1005 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1006 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1007 1008 tdma->dma_dev.dev = &pdev->dev; 1009 tdma->dma_dev.device_alloc_chan_resources = 1010 tegra_adma_alloc_chan_resources; 1011 tdma->dma_dev.device_free_chan_resources = 1012 tegra_adma_free_chan_resources; 1013 tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending; 1014 tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic; 1015 tdma->dma_dev.device_config = tegra_adma_slave_config; 1016 tdma->dma_dev.device_tx_status = tegra_adma_tx_status; 1017 tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; 1018 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1019 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1020 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1021 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 1022 tdma->dma_dev.device_pause = tegra_adma_pause; 1023 tdma->dma_dev.device_resume = tegra_adma_resume; 1024 1025 ret = dma_async_device_register(&tdma->dma_dev); 1026 if (ret < 0) { 1027 dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); 1028 goto rpm_put; 1029 } 1030 1031 ret = of_dma_controller_register(pdev->dev.of_node, 1032 tegra_dma_of_xlate, tdma); 1033 if (ret < 0) { 1034 dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret); 1035 goto dma_remove; 1036 } 1037 1038 pm_runtime_put(&pdev->dev); 1039 1040 dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n", 1041 tdma->nr_channels); 1042 1043 return 0; 1044 1045 dma_remove: 1046 dma_async_device_unregister(&tdma->dma_dev); 1047 rpm_put: 1048 pm_runtime_put_sync(&pdev->dev); 1049 rpm_disable: 1050 pm_runtime_disable(&pdev->dev); 1051 irq_dispose: 1052 while (--i >= 0) 1053 irq_dispose_mapping(tdma->channels[i].irq); 1054 1055 return ret; 1056 } 1057 1058 static void tegra_adma_remove(struct platform_device *pdev) 1059 { 1060 struct tegra_adma *tdma = platform_get_drvdata(pdev); 1061 int i; 1062 1063 of_dma_controller_free(pdev->dev.of_node); 1064 dma_async_device_unregister(&tdma->dma_dev); 1065 1066 for (i = 0; i < tdma->nr_channels; ++i) { 1067 if (tdma->channels[i].irq) 1068 irq_dispose_mapping(tdma->channels[i].irq); 1069 } 1070 1071 pm_runtime_disable(&pdev->dev); 1072 } 1073 1074 static const struct dev_pm_ops tegra_adma_dev_pm_ops = { 1075 SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend, 1076 tegra_adma_runtime_resume, NULL) 1077 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1078 pm_runtime_force_resume) 1079 }; 1080 1081 static struct platform_driver tegra_admac_driver = { 1082 .driver = { 1083 .name = "tegra-adma", 1084 .pm = &tegra_adma_dev_pm_ops, 1085 .of_match_table = tegra_adma_of_match, 1086 }, 1087 .probe = tegra_adma_probe, 1088 .remove = tegra_adma_remove, 1089 }; 1090 1091 module_platform_driver(tegra_admac_driver); 1092 1093 MODULE_ALIAS("platform:tegra210-adma"); 1094 MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver"); 1095 MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>"); 1096 MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>"); 1097 MODULE_LICENSE("GPL v2"); 1098