1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/device.h> 9 #include <linux/dmaengine.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dma/qcom_adm.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/of.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_dma.h> 21 #include <linux/platform_device.h> 22 #include <linux/reset.h> 23 #include <linux/scatterlist.h> 24 #include <linux/slab.h> 25 26 #include "../dmaengine.h" 27 #include "../virt-dma.h" 28 29 /* ADM registers - calculated from channel number and security domain */ 30 #define ADM_CHAN_MULTI 0x4 31 #define ADM_CI_MULTI 0x4 32 #define ADM_CRCI_MULTI 0x4 33 #define ADM_EE_MULTI 0x800 34 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan)) 35 #define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee)) 36 #define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee)) 37 #define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan)) 38 #define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci)) 39 #define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee)) 40 #define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee)) 41 #define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee)) 42 #define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee)) 43 #define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan)) 44 #define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee)) 45 #define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee)) 46 #define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI) 47 #define ADM_GP_CTL 0x3d8 48 #define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \ 49 ADM_EE_OFFS(ee)) 50 51 /* channel status */ 52 #define ADM_CH_STATUS_VALID BIT(1) 53 54 /* channel result */ 55 #define ADM_CH_RSLT_VALID BIT(31) 56 #define ADM_CH_RSLT_ERR BIT(3) 57 #define ADM_CH_RSLT_FLUSH BIT(2) 58 #define ADM_CH_RSLT_TPD BIT(1) 59 60 /* channel conf */ 61 #define ADM_CH_CONF_SHADOW_EN BIT(12) 62 #define ADM_CH_CONF_MPU_DISABLE BIT(11) 63 #define ADM_CH_CONF_PERM_MPU_CONF BIT(9) 64 #define ADM_CH_CONF_FORCE_RSLT_EN BIT(7) 65 #define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11)) 66 67 /* channel result conf */ 68 #define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1) 69 #define ADM_CH_RSLT_CONF_IRQ_EN BIT(0) 70 71 /* CRCI CTL */ 72 #define ADM_CRCI_CTL_MUX_SEL BIT(18) 73 #define ADM_CRCI_CTL_RST BIT(17) 74 75 /* CI configuration */ 76 #define ADM_CI_RANGE_END(x) ((x) << 24) 77 #define ADM_CI_RANGE_START(x) ((x) << 16) 78 #define ADM_CI_BURST_4_WORDS BIT(2) 79 #define ADM_CI_BURST_8_WORDS BIT(3) 80 81 /* GP CTL */ 82 #define ADM_GP_CTL_LP_EN BIT(12) 83 #define ADM_GP_CTL_LP_CNT(x) ((x) << 8) 84 85 /* Command pointer list entry */ 86 #define ADM_CPLE_LP BIT(31) 87 #define ADM_CPLE_CMD_PTR_LIST BIT(29) 88 89 /* Command list entry */ 90 #define ADM_CMD_LC BIT(31) 91 #define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7) 92 #define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3) 93 94 #define ADM_CMD_TYPE_SINGLE 0x0 95 #define ADM_CMD_TYPE_BOX 0x3 96 97 #define ADM_CRCI_MUX_SEL BIT(4) 98 #define ADM_DESC_ALIGN 8 99 #define ADM_MAX_XFER (SZ_64K - 1) 100 #define ADM_MAX_ROWS (SZ_64K - 1) 101 #define ADM_MAX_CHANNELS 16 102 103 struct adm_desc_hw_box { 104 u32 cmd; 105 u32 src_addr; 106 u32 dst_addr; 107 u32 row_len; 108 u32 num_rows; 109 u32 row_offset; 110 }; 111 112 struct adm_desc_hw_single { 113 u32 cmd; 114 u32 src_addr; 115 u32 dst_addr; 116 u32 len; 117 }; 118 119 struct adm_async_desc { 120 struct virt_dma_desc vd; 121 struct adm_device *adev; 122 123 size_t length; 124 enum dma_transfer_direction dir; 125 dma_addr_t dma_addr; 126 size_t dma_len; 127 128 void *cpl; 129 dma_addr_t cp_addr; 130 u32 crci; 131 u32 mux; 132 u32 blk_size; 133 }; 134 135 struct adm_chan { 136 struct virt_dma_chan vc; 137 struct adm_device *adev; 138 139 /* parsed from DT */ 140 u32 id; /* channel id */ 141 142 struct adm_async_desc *curr_txd; 143 struct dma_slave_config slave; 144 u32 crci; 145 u32 mux; 146 struct list_head node; 147 148 int error; 149 int initialized; 150 }; 151 152 static inline struct adm_chan *to_adm_chan(struct dma_chan *common) 153 { 154 return container_of(common, struct adm_chan, vc.chan); 155 } 156 157 struct adm_device { 158 void __iomem *regs; 159 struct device *dev; 160 struct dma_device common; 161 struct device_dma_parameters dma_parms; 162 struct adm_chan *channels; 163 164 u32 ee; 165 166 struct clk *core_clk; 167 struct clk *iface_clk; 168 169 struct reset_control *clk_reset; 170 struct reset_control *c0_reset; 171 struct reset_control *c1_reset; 172 struct reset_control *c2_reset; 173 int irq; 174 }; 175 176 /** 177 * adm_free_chan - Frees dma resources associated with the specific channel 178 * 179 * @chan: dma channel 180 * 181 * Free all allocated descriptors associated with this channel 182 */ 183 static void adm_free_chan(struct dma_chan *chan) 184 { 185 /* free all queued descriptors */ 186 vchan_free_chan_resources(to_virt_chan(chan)); 187 } 188 189 /** 190 * adm_get_blksize - Get block size from burst value 191 * 192 * @burst: Burst size of transaction 193 */ 194 static int adm_get_blksize(unsigned int burst) 195 { 196 int ret; 197 198 switch (burst) { 199 case 16: 200 case 32: 201 case 64: 202 case 128: 203 ret = ffs(burst >> 4) - 1; 204 break; 205 case 192: 206 ret = 4; 207 break; 208 case 256: 209 ret = 5; 210 break; 211 default: 212 ret = -EINVAL; 213 break; 214 } 215 216 return ret; 217 } 218 219 /** 220 * adm_process_fc_descriptors - Process descriptors for flow controlled xfers 221 * 222 * @achan: ADM channel 223 * @desc: Descriptor memory pointer 224 * @sg: Scatterlist entry 225 * @crci: CRCI value 226 * @burst: Burst size of transaction 227 * @direction: DMA transfer direction 228 */ 229 static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc, 230 struct scatterlist *sg, u32 crci, 231 u32 burst, 232 enum dma_transfer_direction direction) 233 { 234 struct adm_desc_hw_box *box_desc = NULL; 235 struct adm_desc_hw_single *single_desc; 236 u32 remainder = sg_dma_len(sg); 237 u32 rows, row_offset, crci_cmd; 238 u32 mem_addr = sg_dma_address(sg); 239 u32 *incr_addr = &mem_addr; 240 u32 *src, *dst; 241 242 if (direction == DMA_DEV_TO_MEM) { 243 crci_cmd = ADM_CMD_SRC_CRCI(crci); 244 row_offset = burst; 245 src = &achan->slave.src_addr; 246 dst = &mem_addr; 247 } else { 248 crci_cmd = ADM_CMD_DST_CRCI(crci); 249 row_offset = burst << 16; 250 src = &mem_addr; 251 dst = &achan->slave.dst_addr; 252 } 253 254 while (remainder >= burst) { 255 box_desc = desc; 256 box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd; 257 box_desc->row_offset = row_offset; 258 box_desc->src_addr = *src; 259 box_desc->dst_addr = *dst; 260 261 rows = remainder / burst; 262 rows = min_t(u32, rows, ADM_MAX_ROWS); 263 box_desc->num_rows = rows << 16 | rows; 264 box_desc->row_len = burst << 16 | burst; 265 266 *incr_addr += burst * rows; 267 remainder -= burst * rows; 268 desc += sizeof(*box_desc); 269 } 270 271 /* if leftover bytes, do one single descriptor */ 272 if (remainder) { 273 single_desc = desc; 274 single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd; 275 single_desc->len = remainder; 276 single_desc->src_addr = *src; 277 single_desc->dst_addr = *dst; 278 desc += sizeof(*single_desc); 279 280 if (sg_is_last(sg)) 281 single_desc->cmd |= ADM_CMD_LC; 282 } else { 283 if (box_desc && sg_is_last(sg)) 284 box_desc->cmd |= ADM_CMD_LC; 285 } 286 287 return desc; 288 } 289 290 /** 291 * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers 292 * 293 * @achan: ADM channel 294 * @desc: Descriptor memory pointer 295 * @sg: Scatterlist entry 296 * @direction: DMA transfer direction 297 */ 298 static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc, 299 struct scatterlist *sg, 300 enum dma_transfer_direction direction) 301 { 302 struct adm_desc_hw_single *single_desc; 303 u32 remainder = sg_dma_len(sg); 304 u32 mem_addr = sg_dma_address(sg); 305 u32 *incr_addr = &mem_addr; 306 u32 *src, *dst; 307 308 if (direction == DMA_DEV_TO_MEM) { 309 src = &achan->slave.src_addr; 310 dst = &mem_addr; 311 } else { 312 src = &mem_addr; 313 dst = &achan->slave.dst_addr; 314 } 315 316 do { 317 single_desc = desc; 318 single_desc->cmd = ADM_CMD_TYPE_SINGLE; 319 single_desc->src_addr = *src; 320 single_desc->dst_addr = *dst; 321 single_desc->len = (remainder > ADM_MAX_XFER) ? 322 ADM_MAX_XFER : remainder; 323 324 remainder -= single_desc->len; 325 *incr_addr += single_desc->len; 326 desc += sizeof(*single_desc); 327 } while (remainder); 328 329 /* set last command if this is the end of the whole transaction */ 330 if (sg_is_last(sg)) 331 single_desc->cmd |= ADM_CMD_LC; 332 333 return desc; 334 } 335 336 /** 337 * adm_prep_slave_sg - Prep slave sg transaction 338 * 339 * @chan: dma channel 340 * @sgl: scatter gather list 341 * @sg_len: length of sg 342 * @direction: DMA transfer direction 343 * @flags: DMA flags 344 * @context: transfer context (unused) 345 */ 346 static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan, 347 struct scatterlist *sgl, 348 unsigned int sg_len, 349 enum dma_transfer_direction direction, 350 unsigned long flags, 351 void *context) 352 { 353 struct adm_chan *achan = to_adm_chan(chan); 354 struct adm_device *adev = achan->adev; 355 struct adm_async_desc *async_desc; 356 struct scatterlist *sg; 357 dma_addr_t cple_addr; 358 u32 i, burst; 359 u32 single_count = 0, box_count = 0, crci = 0; 360 void *desc; 361 u32 *cple; 362 int blk_size = 0; 363 364 if (!is_slave_direction(direction)) { 365 dev_err(adev->dev, "invalid dma direction\n"); 366 return NULL; 367 } 368 369 /* 370 * get burst value from slave configuration 371 */ 372 burst = (direction == DMA_MEM_TO_DEV) ? 373 achan->slave.dst_maxburst : 374 achan->slave.src_maxburst; 375 376 /* if using flow control, validate burst and crci values */ 377 if (achan->slave.device_fc) { 378 blk_size = adm_get_blksize(burst); 379 if (blk_size < 0) { 380 dev_err(adev->dev, "invalid burst value: %d\n", 381 burst); 382 return NULL; 383 } 384 385 crci = achan->crci & 0xf; 386 if (!crci || achan->crci > 0x1f) { 387 dev_err(adev->dev, "invalid crci value\n"); 388 return NULL; 389 } 390 } 391 392 /* iterate through sgs and compute allocation size of structures */ 393 for_each_sg(sgl, sg, sg_len, i) { 394 if (achan->slave.device_fc) { 395 box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst, 396 ADM_MAX_ROWS); 397 if (sg_dma_len(sg) % burst) 398 single_count++; 399 } else { 400 single_count += DIV_ROUND_UP(sg_dma_len(sg), 401 ADM_MAX_XFER); 402 } 403 } 404 405 async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT); 406 if (!async_desc) { 407 dev_err(adev->dev, "not enough memory for async_desc struct\n"); 408 return NULL; 409 } 410 411 async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0; 412 async_desc->crci = crci; 413 async_desc->blk_size = blk_size; 414 async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) + 415 box_count * sizeof(struct adm_desc_hw_box) + 416 sizeof(*cple) + 2 * ADM_DESC_ALIGN; 417 418 async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT); 419 if (!async_desc->cpl) { 420 dev_err(adev->dev, "not enough memory for cpl struct\n"); 421 goto free; 422 } 423 424 async_desc->adev = adev; 425 426 /* both command list entry and descriptors must be 8 byte aligned */ 427 cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN); 428 desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN); 429 430 for_each_sg(sgl, sg, sg_len, i) { 431 async_desc->length += sg_dma_len(sg); 432 433 if (achan->slave.device_fc) 434 desc = adm_process_fc_descriptors(achan, desc, sg, crci, 435 burst, direction); 436 else 437 desc = adm_process_non_fc_descriptors(achan, desc, sg, 438 direction); 439 } 440 441 async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl, 442 async_desc->dma_len, 443 DMA_TO_DEVICE); 444 if (dma_mapping_error(adev->dev, async_desc->dma_addr)) { 445 dev_err(adev->dev, "dma mapping error for cpl\n"); 446 goto free; 447 } 448 449 cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl); 450 451 /* init cmd list */ 452 dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple), 453 DMA_TO_DEVICE); 454 *cple = ADM_CPLE_LP; 455 *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3; 456 dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple), 457 DMA_TO_DEVICE); 458 459 return vchan_tx_prep(&achan->vc, &async_desc->vd, flags); 460 461 free: 462 kfree(async_desc); 463 return NULL; 464 } 465 466 /** 467 * adm_terminate_all - terminate all transactions on a channel 468 * @chan: dma channel 469 * 470 * Dequeues and frees all transactions, aborts current transaction 471 * No callbacks are done 472 * 473 */ 474 static int adm_terminate_all(struct dma_chan *chan) 475 { 476 struct adm_chan *achan = to_adm_chan(chan); 477 struct adm_device *adev = achan->adev; 478 unsigned long flags; 479 LIST_HEAD(head); 480 481 spin_lock_irqsave(&achan->vc.lock, flags); 482 vchan_get_all_descriptors(&achan->vc, &head); 483 484 /* send flush command to terminate current transaction */ 485 writel_relaxed(0x0, 486 adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee)); 487 488 spin_unlock_irqrestore(&achan->vc.lock, flags); 489 490 vchan_dma_desc_free_list(&achan->vc, &head); 491 492 return 0; 493 } 494 495 static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) 496 { 497 struct adm_chan *achan = to_adm_chan(chan); 498 struct qcom_adm_peripheral_config *config = cfg->peripheral_config; 499 unsigned long flag; 500 501 spin_lock_irqsave(&achan->vc.lock, flag); 502 memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config)); 503 if (cfg->peripheral_size == sizeof(*config)) 504 achan->crci = config->crci; 505 spin_unlock_irqrestore(&achan->vc.lock, flag); 506 507 return 0; 508 } 509 510 /** 511 * adm_start_dma - start next transaction 512 * @achan: ADM dma channel 513 */ 514 static void adm_start_dma(struct adm_chan *achan) 515 { 516 struct virt_dma_desc *vd = vchan_next_desc(&achan->vc); 517 struct adm_device *adev = achan->adev; 518 struct adm_async_desc *async_desc; 519 520 lockdep_assert_held(&achan->vc.lock); 521 522 if (!vd) 523 return; 524 525 list_del(&vd->node); 526 527 /* write next command list out to the CMD FIFO */ 528 async_desc = container_of(vd, struct adm_async_desc, vd); 529 achan->curr_txd = async_desc; 530 531 /* reset channel error */ 532 achan->error = 0; 533 534 if (!achan->initialized) { 535 /* enable interrupts */ 536 writel(ADM_CH_CONF_SHADOW_EN | 537 ADM_CH_CONF_PERM_MPU_CONF | 538 ADM_CH_CONF_MPU_DISABLE | 539 ADM_CH_CONF_SEC_DOMAIN(adev->ee), 540 adev->regs + ADM_CH_CONF(achan->id)); 541 542 writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN, 543 adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee)); 544 545 achan->initialized = 1; 546 } 547 548 /* set the crci block size if this transaction requires CRCI */ 549 if (async_desc->crci) { 550 writel(async_desc->mux | async_desc->blk_size, 551 adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee)); 552 } 553 554 /* make sure IRQ enable doesn't get reordered */ 555 wmb(); 556 557 /* write next command list out to the CMD FIFO */ 558 writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3, 559 adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee)); 560 } 561 562 /** 563 * adm_dma_irq - irq handler for ADM controller 564 * @irq: IRQ of interrupt 565 * @data: callback data 566 * 567 * IRQ handler for the bam controller 568 */ 569 static irqreturn_t adm_dma_irq(int irq, void *data) 570 { 571 struct adm_device *adev = data; 572 u32 srcs, i; 573 struct adm_async_desc *async_desc; 574 unsigned long flags; 575 576 srcs = readl_relaxed(adev->regs + 577 ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee)); 578 579 for (i = 0; i < ADM_MAX_CHANNELS; i++) { 580 struct adm_chan *achan = &adev->channels[i]; 581 u32 status, result; 582 583 if (srcs & BIT(i)) { 584 status = readl_relaxed(adev->regs + 585 ADM_CH_STATUS_SD(i, adev->ee)); 586 587 /* if no result present, skip */ 588 if (!(status & ADM_CH_STATUS_VALID)) 589 continue; 590 591 result = readl_relaxed(adev->regs + 592 ADM_CH_RSLT(i, adev->ee)); 593 594 /* no valid results, skip */ 595 if (!(result & ADM_CH_RSLT_VALID)) 596 continue; 597 598 /* flag error if transaction was flushed or failed */ 599 if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH)) 600 achan->error = 1; 601 602 spin_lock_irqsave(&achan->vc.lock, flags); 603 async_desc = achan->curr_txd; 604 605 achan->curr_txd = NULL; 606 607 if (async_desc) { 608 vchan_cookie_complete(&async_desc->vd); 609 610 /* kick off next DMA */ 611 adm_start_dma(achan); 612 } 613 614 spin_unlock_irqrestore(&achan->vc.lock, flags); 615 } 616 } 617 618 return IRQ_HANDLED; 619 } 620 621 /** 622 * adm_tx_status - returns status of transaction 623 * @chan: dma channel 624 * @cookie: transaction cookie 625 * @txstate: DMA transaction state 626 * 627 * Return status of dma transaction 628 */ 629 static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 630 struct dma_tx_state *txstate) 631 { 632 struct adm_chan *achan = to_adm_chan(chan); 633 struct virt_dma_desc *vd; 634 enum dma_status ret; 635 unsigned long flags; 636 size_t residue = 0; 637 638 ret = dma_cookie_status(chan, cookie, txstate); 639 if (ret == DMA_COMPLETE || !txstate) 640 return ret; 641 642 spin_lock_irqsave(&achan->vc.lock, flags); 643 644 vd = vchan_find_desc(&achan->vc, cookie); 645 if (vd) 646 residue = container_of(vd, struct adm_async_desc, vd)->length; 647 648 spin_unlock_irqrestore(&achan->vc.lock, flags); 649 650 /* 651 * residue is either the full length if it is in the issued list, or 0 652 * if it is in progress. We have no reliable way of determining 653 * anything inbetween 654 */ 655 dma_set_residue(txstate, residue); 656 657 if (achan->error) 658 return DMA_ERROR; 659 660 return ret; 661 } 662 663 /** 664 * adm_issue_pending - starts pending transactions 665 * @chan: dma channel 666 * 667 * Issues all pending transactions and starts DMA 668 */ 669 static void adm_issue_pending(struct dma_chan *chan) 670 { 671 struct adm_chan *achan = to_adm_chan(chan); 672 unsigned long flags; 673 674 spin_lock_irqsave(&achan->vc.lock, flags); 675 676 if (vchan_issue_pending(&achan->vc) && !achan->curr_txd) 677 adm_start_dma(achan); 678 spin_unlock_irqrestore(&achan->vc.lock, flags); 679 } 680 681 /** 682 * adm_dma_free_desc - free descriptor memory 683 * @vd: virtual descriptor 684 * 685 */ 686 static void adm_dma_free_desc(struct virt_dma_desc *vd) 687 { 688 struct adm_async_desc *async_desc = container_of(vd, 689 struct adm_async_desc, vd); 690 691 dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr, 692 async_desc->dma_len, DMA_TO_DEVICE); 693 kfree(async_desc->cpl); 694 kfree(async_desc); 695 } 696 697 static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan, 698 u32 index) 699 { 700 achan->id = index; 701 achan->adev = adev; 702 703 vchan_init(&achan->vc, &adev->common); 704 achan->vc.desc_free = adm_dma_free_desc; 705 } 706 707 /** 708 * adm_dma_xlate 709 * @dma_spec: pointer to DMA specifier as found in the device tree 710 * @ofdma: pointer to DMA controller data 711 * 712 * This can use either 1-cell or 2-cell formats, the first cell 713 * identifies the slave device, while the optional second cell 714 * contains the crci value. 715 * 716 * Returns pointer to appropriate dma channel on success or NULL on error. 717 */ 718 static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec, 719 struct of_dma *ofdma) 720 { 721 struct dma_device *dev = ofdma->of_dma_data; 722 struct dma_chan *chan, *candidate = NULL; 723 struct adm_chan *achan; 724 725 if (!dev || dma_spec->args_count > 2) 726 return NULL; 727 728 list_for_each_entry(chan, &dev->channels, device_node) 729 if (chan->chan_id == dma_spec->args[0]) { 730 candidate = chan; 731 break; 732 } 733 734 if (!candidate) 735 return NULL; 736 737 achan = to_adm_chan(candidate); 738 if (dma_spec->args_count == 2) 739 achan->crci = dma_spec->args[1]; 740 else 741 achan->crci = 0; 742 743 return dma_get_slave_channel(candidate); 744 } 745 746 static int adm_dma_probe(struct platform_device *pdev) 747 { 748 struct adm_device *adev; 749 int ret; 750 u32 i; 751 752 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 753 if (!adev) 754 return -ENOMEM; 755 756 adev->dev = &pdev->dev; 757 758 adev->regs = devm_platform_ioremap_resource(pdev, 0); 759 if (IS_ERR(adev->regs)) 760 return PTR_ERR(adev->regs); 761 762 adev->irq = platform_get_irq(pdev, 0); 763 if (adev->irq < 0) 764 return adev->irq; 765 766 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee); 767 if (ret) { 768 dev_err(adev->dev, "Execution environment unspecified\n"); 769 return ret; 770 } 771 772 adev->core_clk = devm_clk_get(adev->dev, "core"); 773 if (IS_ERR(adev->core_clk)) 774 return PTR_ERR(adev->core_clk); 775 776 adev->iface_clk = devm_clk_get(adev->dev, "iface"); 777 if (IS_ERR(adev->iface_clk)) 778 return PTR_ERR(adev->iface_clk); 779 780 adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk"); 781 if (IS_ERR(adev->clk_reset)) { 782 dev_err(adev->dev, "failed to get ADM0 reset\n"); 783 return PTR_ERR(adev->clk_reset); 784 } 785 786 adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0"); 787 if (IS_ERR(adev->c0_reset)) { 788 dev_err(adev->dev, "failed to get ADM0 C0 reset\n"); 789 return PTR_ERR(adev->c0_reset); 790 } 791 792 adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1"); 793 if (IS_ERR(adev->c1_reset)) { 794 dev_err(adev->dev, "failed to get ADM0 C1 reset\n"); 795 return PTR_ERR(adev->c1_reset); 796 } 797 798 adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2"); 799 if (IS_ERR(adev->c2_reset)) { 800 dev_err(adev->dev, "failed to get ADM0 C2 reset\n"); 801 return PTR_ERR(adev->c2_reset); 802 } 803 804 ret = clk_prepare_enable(adev->core_clk); 805 if (ret) { 806 dev_err(adev->dev, "failed to prepare/enable core clock\n"); 807 return ret; 808 } 809 810 ret = clk_prepare_enable(adev->iface_clk); 811 if (ret) { 812 dev_err(adev->dev, "failed to prepare/enable iface clock\n"); 813 goto err_disable_core_clk; 814 } 815 816 reset_control_assert(adev->clk_reset); 817 reset_control_assert(adev->c0_reset); 818 reset_control_assert(adev->c1_reset); 819 reset_control_assert(adev->c2_reset); 820 821 udelay(2); 822 823 reset_control_deassert(adev->clk_reset); 824 reset_control_deassert(adev->c0_reset); 825 reset_control_deassert(adev->c1_reset); 826 reset_control_deassert(adev->c2_reset); 827 828 adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS, 829 sizeof(*adev->channels), GFP_KERNEL); 830 831 if (!adev->channels) { 832 ret = -ENOMEM; 833 goto err_disable_clks; 834 } 835 836 /* allocate and initialize channels */ 837 INIT_LIST_HEAD(&adev->common.channels); 838 839 for (i = 0; i < ADM_MAX_CHANNELS; i++) 840 adm_channel_init(adev, &adev->channels[i], i); 841 842 /* reset CRCIs */ 843 for (i = 0; i < 16; i++) 844 writel(ADM_CRCI_CTL_RST, adev->regs + 845 ADM_CRCI_CTL(i, adev->ee)); 846 847 /* configure client interfaces */ 848 writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) | 849 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0)); 850 writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) | 851 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1)); 852 writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) | 853 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2)); 854 writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf), 855 adev->regs + ADM_GP_CTL); 856 857 ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq, 858 0, "adm_dma", adev); 859 if (ret) 860 goto err_disable_clks; 861 862 platform_set_drvdata(pdev, adev); 863 864 adev->common.dev = adev->dev; 865 adev->common.dev->dma_parms = &adev->dma_parms; 866 867 /* set capabilities */ 868 dma_cap_zero(adev->common.cap_mask); 869 dma_cap_set(DMA_SLAVE, adev->common.cap_mask); 870 dma_cap_set(DMA_PRIVATE, adev->common.cap_mask); 871 872 /* initialize dmaengine apis */ 873 adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV); 874 adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 875 adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; 876 adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; 877 adev->common.device_free_chan_resources = adm_free_chan; 878 adev->common.device_prep_slave_sg = adm_prep_slave_sg; 879 adev->common.device_issue_pending = adm_issue_pending; 880 adev->common.device_tx_status = adm_tx_status; 881 adev->common.device_terminate_all = adm_terminate_all; 882 adev->common.device_config = adm_slave_config; 883 884 ret = dma_async_device_register(&adev->common); 885 if (ret) { 886 dev_err(adev->dev, "failed to register dma async device\n"); 887 goto err_disable_clks; 888 } 889 890 ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate, 891 &adev->common); 892 if (ret) 893 goto err_unregister_dma; 894 895 return 0; 896 897 err_unregister_dma: 898 dma_async_device_unregister(&adev->common); 899 err_disable_clks: 900 clk_disable_unprepare(adev->iface_clk); 901 err_disable_core_clk: 902 clk_disable_unprepare(adev->core_clk); 903 904 return ret; 905 } 906 907 static int adm_dma_remove(struct platform_device *pdev) 908 { 909 struct adm_device *adev = platform_get_drvdata(pdev); 910 struct adm_chan *achan; 911 u32 i; 912 913 of_dma_controller_free(pdev->dev.of_node); 914 dma_async_device_unregister(&adev->common); 915 916 for (i = 0; i < ADM_MAX_CHANNELS; i++) { 917 achan = &adev->channels[i]; 918 919 /* mask IRQs for this channel/EE pair */ 920 writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee)); 921 922 tasklet_kill(&adev->channels[i].vc.task); 923 adm_terminate_all(&adev->channels[i].vc.chan); 924 } 925 926 devm_free_irq(adev->dev, adev->irq, adev); 927 928 clk_disable_unprepare(adev->core_clk); 929 clk_disable_unprepare(adev->iface_clk); 930 931 return 0; 932 } 933 934 static const struct of_device_id adm_of_match[] = { 935 { .compatible = "qcom,adm", }, 936 {} 937 }; 938 MODULE_DEVICE_TABLE(of, adm_of_match); 939 940 static struct platform_driver adm_dma_driver = { 941 .probe = adm_dma_probe, 942 .remove = adm_dma_remove, 943 .driver = { 944 .name = "adm-dma-engine", 945 .of_match_table = adm_of_match, 946 }, 947 }; 948 949 module_platform_driver(adm_dma_driver); 950 951 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); 952 MODULE_DESCRIPTION("QCOM ADM DMA engine driver"); 953 MODULE_LICENSE("GPL v2"); 954