1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx DMA/Bridge Subsystem 4 * 5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. 6 * Copyright (C) 2022, Advanced Micro Devices, Inc. 7 */ 8 9 /* 10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data 11 * between Host memory and the DMA subsystem. It does this by operating on 12 * 'descriptors' that contain information about the source, destination and 13 * amount of data to transfer. These direct memory transfers can be both in 14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be 15 * configured to have a single AXI4 Master interface shared by all channels 16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are 17 * specified on a per-channel basis in descriptor linked lists, which the DMA 18 * fetches from host memory and processes. Events such as descriptor completion 19 * and errors are signaled using interrupts. The core also provides up to 16 20 * user interrupt wires that generate interrupts to the host. 21 */ 22 23 #include <linux/mod_devicetable.h> 24 #include <linux/bitfield.h> 25 #include <linux/dmapool.h> 26 #include <linux/regmap.h> 27 #include <linux/dmaengine.h> 28 #include <linux/dma/amd_xdma.h> 29 #include <linux/platform_device.h> 30 #include <linux/platform_data/amd_xdma.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/pci.h> 33 #include "../virt-dma.h" 34 #include "xdma-regs.h" 35 36 /* mmio regmap config for all XDMA registers */ 37 static const struct regmap_config xdma_regmap_config = { 38 .reg_bits = 32, 39 .val_bits = 32, 40 .reg_stride = 4, 41 .max_register = XDMA_MAX_REG_OFFSET, 42 }; 43 44 /** 45 * struct xdma_desc_block - Descriptor block 46 * @virt_addr: Virtual address of block start 47 * @dma_addr: DMA address of block start 48 */ 49 struct xdma_desc_block { 50 void *virt_addr; 51 dma_addr_t dma_addr; 52 }; 53 54 /** 55 * struct xdma_chan - Driver specific DMA channel structure 56 * @vchan: Virtual channel 57 * @xdev_hdl: Pointer to DMA device structure 58 * @base: Offset of channel registers 59 * @desc_pool: Descriptor pool 60 * @busy: Busy flag of the channel 61 * @dir: Transferring direction of the channel 62 * @cfg: Transferring config of the channel 63 * @irq: IRQ assigned to the channel 64 */ 65 struct xdma_chan { 66 struct virt_dma_chan vchan; 67 void *xdev_hdl; 68 u32 base; 69 struct dma_pool *desc_pool; 70 bool busy; 71 enum dma_transfer_direction dir; 72 struct dma_slave_config cfg; 73 u32 irq; 74 struct completion last_interrupt; 75 bool stop_requested; 76 }; 77 78 /** 79 * struct xdma_desc - DMA desc structure 80 * @vdesc: Virtual DMA descriptor 81 * @chan: DMA channel pointer 82 * @dir: Transferring direction of the request 83 * @desc_blocks: Hardware descriptor blocks 84 * @dblk_num: Number of hardware descriptor blocks 85 * @desc_num: Number of hardware descriptors 86 * @completed_desc_num: Completed hardware descriptors 87 * @cyclic: Cyclic transfer vs. scatter-gather 88 * @interleaved_dma: Interleaved DMA transfer 89 * @periods: Number of periods in the cyclic transfer 90 * @period_size: Size of a period in bytes in cyclic transfers 91 * @frames_left: Number of frames left in interleaved DMA transfer 92 * @error: tx error flag 93 */ 94 struct xdma_desc { 95 struct virt_dma_desc vdesc; 96 struct xdma_chan *chan; 97 enum dma_transfer_direction dir; 98 struct xdma_desc_block *desc_blocks; 99 u32 dblk_num; 100 u32 desc_num; 101 u32 completed_desc_num; 102 bool cyclic; 103 bool interleaved_dma; 104 u32 periods; 105 u32 period_size; 106 u32 frames_left; 107 bool error; 108 }; 109 110 #define XDMA_DEV_STATUS_REG_DMA BIT(0) 111 #define XDMA_DEV_STATUS_INIT_MSIX BIT(1) 112 113 /** 114 * struct xdma_device - DMA device structure 115 * @pdev: Platform device pointer 116 * @dma_dev: DMA device structure 117 * @rmap: MMIO regmap for DMA registers 118 * @h2c_chans: Host to Card channels 119 * @c2h_chans: Card to Host channels 120 * @h2c_chan_num: Number of H2C channels 121 * @c2h_chan_num: Number of C2H channels 122 * @irq_start: Start IRQ assigned to device 123 * @irq_num: Number of IRQ assigned to device 124 * @status: Initialization status 125 */ 126 struct xdma_device { 127 struct platform_device *pdev; 128 struct dma_device dma_dev; 129 struct regmap *rmap; 130 struct xdma_chan *h2c_chans; 131 struct xdma_chan *c2h_chans; 132 u32 h2c_chan_num; 133 u32 c2h_chan_num; 134 u32 irq_start; 135 u32 irq_num; 136 u32 status; 137 }; 138 139 #define xdma_err(xdev, fmt, args...) \ 140 dev_err(&(xdev)->pdev->dev, fmt, ##args) 141 #define XDMA_CHAN_NUM(_xd) ({ \ 142 typeof(_xd) (xd) = (_xd); \ 143 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) 144 145 /* Get the last desc in a desc block */ 146 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) 147 { 148 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; 149 } 150 151 /** 152 * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer 153 * @sw_desc: Tx descriptor pointer 154 */ 155 static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) 156 { 157 struct xdma_desc_block *block; 158 u32 last_blk_desc, desc_control; 159 struct xdma_hw_desc *desc; 160 int i; 161 162 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); 163 for (i = 1; i < sw_desc->dblk_num; i++) { 164 block = &sw_desc->desc_blocks[i - 1]; 165 desc = xdma_blk_last_desc(block); 166 167 if (!(i & XDMA_DESC_BLOCK_MASK)) { 168 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); 169 continue; 170 } 171 desc->control = cpu_to_le32(desc_control); 172 desc->next_desc = cpu_to_le64(block[1].dma_addr); 173 } 174 175 /* update the last block */ 176 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; 177 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { 178 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; 179 desc = xdma_blk_last_desc(block); 180 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); 181 desc->control = cpu_to_le32(desc_control); 182 } 183 184 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; 185 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; 186 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); 187 } 188 189 /** 190 * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer 191 * @sw_desc: Tx descriptor pointer 192 */ 193 static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) 194 { 195 struct xdma_desc_block *block; 196 struct xdma_hw_desc *desc; 197 int i; 198 199 block = sw_desc->desc_blocks; 200 for (i = 0; i < sw_desc->desc_num - 1; i++) { 201 desc = block->virt_addr + i * XDMA_DESC_SIZE; 202 desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); 203 } 204 desc = block->virt_addr + i * XDMA_DESC_SIZE; 205 desc->next_desc = cpu_to_le64(block->dma_addr); 206 } 207 208 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) 209 { 210 return container_of(chan, struct xdma_chan, vchan.chan); 211 } 212 213 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) 214 { 215 return container_of(vdesc, struct xdma_desc, vdesc); 216 } 217 218 /** 219 * xdma_channel_init - Initialize DMA channel registers 220 * @chan: DMA channel pointer 221 */ 222 static int xdma_channel_init(struct xdma_chan *chan) 223 { 224 struct xdma_device *xdev = chan->xdev_hdl; 225 int ret; 226 227 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C, 228 CHAN_CTRL_NON_INCR_ADDR); 229 if (ret) 230 return ret; 231 232 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE, 233 CHAN_IM_ALL); 234 if (ret) 235 return ret; 236 237 return 0; 238 } 239 240 /** 241 * xdma_free_desc - Free descriptor 242 * @vdesc: Virtual DMA descriptor 243 */ 244 static void xdma_free_desc(struct virt_dma_desc *vdesc) 245 { 246 struct xdma_desc *sw_desc; 247 int i; 248 249 sw_desc = to_xdma_desc(vdesc); 250 for (i = 0; i < sw_desc->dblk_num; i++) { 251 if (!sw_desc->desc_blocks[i].virt_addr) 252 break; 253 dma_pool_free(sw_desc->chan->desc_pool, 254 sw_desc->desc_blocks[i].virt_addr, 255 sw_desc->desc_blocks[i].dma_addr); 256 } 257 kfree(sw_desc->desc_blocks); 258 kfree(sw_desc); 259 } 260 261 /** 262 * xdma_alloc_desc - Allocate descriptor 263 * @chan: DMA channel pointer 264 * @desc_num: Number of hardware descriptors 265 * @cyclic: Whether this is a cyclic transfer 266 */ 267 static struct xdma_desc * 268 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) 269 { 270 struct xdma_desc *sw_desc; 271 struct xdma_hw_desc *desc; 272 dma_addr_t dma_addr; 273 u32 dblk_num; 274 u32 control; 275 void *addr; 276 int i, j; 277 278 sw_desc = kzalloc_obj(*sw_desc, GFP_NOWAIT); 279 if (!sw_desc) 280 return NULL; 281 282 sw_desc->chan = chan; 283 sw_desc->desc_num = desc_num; 284 sw_desc->cyclic = cyclic; 285 sw_desc->error = false; 286 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); 287 sw_desc->desc_blocks = kzalloc_objs(*sw_desc->desc_blocks, dblk_num, 288 GFP_NOWAIT); 289 if (!sw_desc->desc_blocks) 290 goto failed; 291 292 if (cyclic) 293 control = XDMA_DESC_CONTROL_CYCLIC; 294 else 295 control = XDMA_DESC_CONTROL(1, 0); 296 297 sw_desc->dblk_num = dblk_num; 298 for (i = 0; i < sw_desc->dblk_num; i++) { 299 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr); 300 if (!addr) 301 goto failed; 302 303 sw_desc->desc_blocks[i].virt_addr = addr; 304 sw_desc->desc_blocks[i].dma_addr = dma_addr; 305 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) 306 desc[j].control = cpu_to_le32(control); 307 } 308 309 if (cyclic) 310 xdma_link_cyclic_desc_blocks(sw_desc); 311 else 312 xdma_link_sg_desc_blocks(sw_desc); 313 314 return sw_desc; 315 316 failed: 317 xdma_free_desc(&sw_desc->vdesc); 318 return NULL; 319 } 320 321 /** 322 * xdma_xfer_start - Start DMA transfer 323 * @xchan: DMA channel pointer 324 */ 325 static int xdma_xfer_start(struct xdma_chan *xchan) 326 { 327 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan); 328 struct xdma_device *xdev = xchan->xdev_hdl; 329 struct xdma_desc_block *block; 330 u32 val, completed_blocks; 331 struct xdma_desc *desc; 332 int ret; 333 334 /* 335 * check if there is not any submitted descriptor or channel is busy. 336 * vchan lock should be held where this function is called. 337 */ 338 if (!vd || xchan->busy) 339 return -EINVAL; 340 341 /* clear run stop bit to get ready for transfer */ 342 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, 343 CHAN_CTRL_RUN_STOP); 344 if (ret) 345 return ret; 346 347 desc = to_xdma_desc(vd); 348 if (desc->dir != xchan->dir) { 349 xdma_err(xdev, "incorrect request direction"); 350 return -EINVAL; 351 } 352 353 /* set DMA engine to the first descriptor block */ 354 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; 355 block = &desc->desc_blocks[completed_blocks]; 356 val = lower_32_bits(block->dma_addr); 357 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); 358 if (ret) 359 return ret; 360 361 val = upper_32_bits(block->dma_addr); 362 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); 363 if (ret) 364 return ret; 365 366 if (completed_blocks + 1 == desc->dblk_num) 367 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; 368 else 369 val = XDMA_DESC_ADJACENT - 1; 370 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); 371 if (ret) 372 return ret; 373 374 /* kick off DMA transfer */ 375 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL, 376 CHAN_CTRL_START); 377 if (ret) 378 return ret; 379 380 xchan->busy = true; 381 xchan->stop_requested = false; 382 reinit_completion(&xchan->last_interrupt); 383 384 return 0; 385 } 386 387 /** 388 * xdma_xfer_stop - Stop DMA transfer 389 * @xchan: DMA channel pointer 390 */ 391 static int xdma_xfer_stop(struct xdma_chan *xchan) 392 { 393 struct xdma_device *xdev = xchan->xdev_hdl; 394 395 /* clear run stop bit to prevent any further auto-triggering */ 396 return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C, 397 CHAN_CTRL_RUN_STOP); 398 } 399 400 /** 401 * xdma_alloc_channels - Detect and allocate DMA channels 402 * @xdev: DMA device pointer 403 * @dir: Channel direction 404 */ 405 static int xdma_alloc_channels(struct xdma_device *xdev, 406 enum dma_transfer_direction dir) 407 { 408 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev); 409 struct xdma_chan **chans, *xchan; 410 u32 base, identifier, target; 411 u32 *chan_num; 412 int i, j, ret; 413 414 if (dir == DMA_MEM_TO_DEV) { 415 base = XDMA_CHAN_H2C_OFFSET; 416 target = XDMA_CHAN_H2C_TARGET; 417 chans = &xdev->h2c_chans; 418 chan_num = &xdev->h2c_chan_num; 419 } else if (dir == DMA_DEV_TO_MEM) { 420 base = XDMA_CHAN_C2H_OFFSET; 421 target = XDMA_CHAN_C2H_TARGET; 422 chans = &xdev->c2h_chans; 423 chan_num = &xdev->c2h_chan_num; 424 } else { 425 xdma_err(xdev, "invalid direction specified"); 426 return -EINVAL; 427 } 428 429 /* detect number of available DMA channels */ 430 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { 431 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, 432 &identifier); 433 if (ret) 434 return ret; 435 436 /* check if it is available DMA channel */ 437 if (XDMA_CHAN_CHECK_TARGET(identifier, target)) 438 (*chan_num)++; 439 } 440 441 if (!*chan_num) { 442 xdma_err(xdev, "does not probe any channel"); 443 return -EINVAL; 444 } 445 446 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans), 447 GFP_KERNEL); 448 if (!*chans) 449 return -ENOMEM; 450 451 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { 452 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE, 453 &identifier); 454 if (ret) 455 return ret; 456 457 if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) 458 continue; 459 460 if (j == *chan_num) { 461 xdma_err(xdev, "invalid channel number"); 462 return -EIO; 463 } 464 465 /* init channel structure and hardware */ 466 xchan = &(*chans)[j]; 467 xchan->xdev_hdl = xdev; 468 xchan->base = base + i * XDMA_CHAN_STRIDE; 469 xchan->dir = dir; 470 xchan->stop_requested = false; 471 init_completion(&xchan->last_interrupt); 472 473 ret = xdma_channel_init(xchan); 474 if (ret) 475 return ret; 476 xchan->vchan.desc_free = xdma_free_desc; 477 vchan_init(&xchan->vchan, &xdev->dma_dev); 478 479 j++; 480 } 481 482 dev_info(&xdev->pdev->dev, "configured %d %s channels", j, 483 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H"); 484 485 return 0; 486 } 487 488 /** 489 * xdma_issue_pending - Issue pending transactions 490 * @chan: DMA channel pointer 491 */ 492 static void xdma_issue_pending(struct dma_chan *chan) 493 { 494 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 495 unsigned long flags; 496 497 spin_lock_irqsave(&xdma_chan->vchan.lock, flags); 498 if (vchan_issue_pending(&xdma_chan->vchan)) 499 xdma_xfer_start(xdma_chan); 500 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); 501 } 502 503 /** 504 * xdma_terminate_all - Terminate all transactions 505 * @chan: DMA channel pointer 506 */ 507 static int xdma_terminate_all(struct dma_chan *chan) 508 { 509 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 510 struct virt_dma_desc *vd; 511 unsigned long flags; 512 LIST_HEAD(head); 513 514 xdma_xfer_stop(xdma_chan); 515 516 spin_lock_irqsave(&xdma_chan->vchan.lock, flags); 517 518 xdma_chan->busy = false; 519 xdma_chan->stop_requested = true; 520 vd = vchan_next_desc(&xdma_chan->vchan); 521 if (vd) { 522 list_del(&vd->node); 523 dma_cookie_complete(&vd->tx); 524 vchan_terminate_vdesc(vd); 525 } 526 vchan_get_all_descriptors(&xdma_chan->vchan, &head); 527 list_splice_tail(&head, &xdma_chan->vchan.desc_terminated); 528 529 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); 530 531 return 0; 532 } 533 534 /** 535 * xdma_synchronize - Synchronize terminated transactions 536 * @chan: DMA channel pointer 537 */ 538 static void xdma_synchronize(struct dma_chan *chan) 539 { 540 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 541 struct xdma_device *xdev = xdma_chan->xdev_hdl; 542 int st = 0; 543 544 /* If the engine continues running, wait for the last interrupt */ 545 regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st); 546 if (st & XDMA_CHAN_STATUS_BUSY) 547 wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000)); 548 549 vchan_synchronize(&xdma_chan->vchan); 550 } 551 552 /** 553 * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk. 554 * More than one descriptor will be used if the size is bigger 555 * than XDMA_DESC_BLEN_MAX. 556 * @sw_desc: Descriptor container 557 * @src_addr: First value for the ->src_addr field 558 * @dst_addr: First value for the ->dst_addr field 559 * @size: Size of the contiguous memory block 560 * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc 561 */ 562 static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, 563 u64 dst_addr, u32 size, u32 filled_descs_num) 564 { 565 u32 left = size, len, desc_num = filled_descs_num; 566 struct xdma_desc_block *dblk; 567 struct xdma_hw_desc *desc; 568 569 dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); 570 desc = dblk->virt_addr; 571 desc += desc_num & XDMA_DESC_ADJACENT_MASK; 572 do { 573 len = min_t(u32, left, XDMA_DESC_BLEN_MAX); 574 /* set hardware descriptor */ 575 desc->bytes = cpu_to_le32(len); 576 desc->src_addr = cpu_to_le64(src_addr); 577 desc->dst_addr = cpu_to_le64(dst_addr); 578 if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) 579 desc = (++dblk)->virt_addr; 580 else 581 desc++; 582 583 src_addr += len; 584 dst_addr += len; 585 left -= len; 586 } while (left); 587 588 return desc_num - filled_descs_num; 589 } 590 591 /** 592 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction 593 * @chan: DMA channel pointer 594 * @sgl: Transfer scatter gather list 595 * @sg_len: Length of scatter gather list 596 * @dir: Transfer direction 597 * @flags: transfer ack flags 598 * @context: APP words of the descriptor 599 */ 600 static struct dma_async_tx_descriptor * 601 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, 602 unsigned int sg_len, enum dma_transfer_direction dir, 603 unsigned long flags, void *context) 604 { 605 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 606 struct dma_async_tx_descriptor *tx_desc; 607 struct xdma_desc *sw_desc; 608 u64 addr, dev_addr, *src, *dst; 609 u32 desc_num, i; 610 struct scatterlist *sg; 611 612 desc_num = sg_nents_for_dma(sgl, sg_len, XDMA_DESC_BLEN_MAX); 613 sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false); 614 if (!sw_desc) 615 return NULL; 616 sw_desc->dir = dir; 617 sw_desc->cyclic = false; 618 sw_desc->interleaved_dma = false; 619 620 if (dir == DMA_MEM_TO_DEV) { 621 dev_addr = xdma_chan->cfg.dst_addr; 622 src = &addr; 623 dst = &dev_addr; 624 } else { 625 dev_addr = xdma_chan->cfg.src_addr; 626 src = &dev_addr; 627 dst = &addr; 628 } 629 630 desc_num = 0; 631 for_each_sg(sgl, sg, sg_len, i) { 632 addr = sg_dma_address(sg); 633 desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num); 634 dev_addr += sg_dma_len(sg); 635 } 636 637 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); 638 if (!tx_desc) 639 goto failed; 640 641 return tx_desc; 642 643 failed: 644 xdma_free_desc(&sw_desc->vdesc); 645 646 return NULL; 647 } 648 649 /** 650 * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions 651 * @chan: DMA channel pointer 652 * @address: Device DMA address to access 653 * @size: Total length to transfer 654 * @period_size: Period size to use for each transfer 655 * @dir: Transfer direction 656 * @flags: Transfer ack flags 657 */ 658 static struct dma_async_tx_descriptor * 659 xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, 660 size_t size, size_t period_size, 661 enum dma_transfer_direction dir, 662 unsigned long flags) 663 { 664 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 665 struct xdma_device *xdev = xdma_chan->xdev_hdl; 666 unsigned int periods = size / period_size; 667 struct dma_async_tx_descriptor *tx_desc; 668 struct xdma_desc *sw_desc; 669 u64 addr, dev_addr, *src, *dst; 670 u32 desc_num; 671 unsigned int i; 672 673 /* 674 * Simplify the whole logic by preventing an abnormally high number of 675 * periods and periods size. 676 */ 677 if (period_size > XDMA_DESC_BLEN_MAX) { 678 xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); 679 return NULL; 680 } 681 682 if (periods > XDMA_DESC_ADJACENT) { 683 xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); 684 return NULL; 685 } 686 687 sw_desc = xdma_alloc_desc(xdma_chan, periods, true); 688 if (!sw_desc) 689 return NULL; 690 691 sw_desc->periods = periods; 692 sw_desc->period_size = period_size; 693 sw_desc->dir = dir; 694 sw_desc->interleaved_dma = false; 695 696 addr = address; 697 if (dir == DMA_MEM_TO_DEV) { 698 dev_addr = xdma_chan->cfg.dst_addr; 699 src = &addr; 700 dst = &dev_addr; 701 } else { 702 dev_addr = xdma_chan->cfg.src_addr; 703 src = &dev_addr; 704 dst = &addr; 705 } 706 707 desc_num = 0; 708 for (i = 0; i < periods; i++) { 709 desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num); 710 addr += period_size; 711 } 712 713 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); 714 if (!tx_desc) 715 goto failed; 716 717 return tx_desc; 718 719 failed: 720 xdma_free_desc(&sw_desc->vdesc); 721 722 return NULL; 723 } 724 725 /** 726 * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers 727 * @chan: DMA channel 728 * @xt: DMA transfer template 729 * @flags: tx flags 730 */ 731 static struct dma_async_tx_descriptor * 732 xdma_prep_interleaved_dma(struct dma_chan *chan, 733 struct dma_interleaved_template *xt, 734 unsigned long flags) 735 { 736 int i; 737 u32 desc_num = 0, period_size = 0; 738 struct dma_async_tx_descriptor *tx_desc; 739 struct xdma_chan *xchan = to_xdma_chan(chan); 740 struct xdma_desc *sw_desc; 741 u64 src_addr, dst_addr; 742 743 for (i = 0; i < xt->frame_size; ++i) 744 desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); 745 746 sw_desc = xdma_alloc_desc(xchan, desc_num, false); 747 if (!sw_desc) 748 return NULL; 749 sw_desc->dir = xt->dir; 750 sw_desc->interleaved_dma = true; 751 sw_desc->cyclic = flags & DMA_PREP_REPEAT; 752 sw_desc->frames_left = xt->numf; 753 sw_desc->periods = xt->numf; 754 755 desc_num = 0; 756 src_addr = xt->src_start; 757 dst_addr = xt->dst_start; 758 for (i = 0; i < xt->frame_size; ++i) { 759 desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num); 760 src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ? 761 xt->sgl[i].size : 0); 762 dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ? 763 xt->sgl[i].size : 0); 764 period_size += xt->sgl[i].size; 765 } 766 sw_desc->period_size = period_size; 767 768 tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags); 769 if (tx_desc) 770 return tx_desc; 771 772 xdma_free_desc(&sw_desc->vdesc); 773 return NULL; 774 } 775 776 /** 777 * xdma_device_config - Configure the DMA channel 778 * @chan: DMA channel 779 * @cfg: channel configuration 780 */ 781 static int xdma_device_config(struct dma_chan *chan, 782 struct dma_slave_config *cfg) 783 { 784 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 785 786 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); 787 788 return 0; 789 } 790 791 /** 792 * xdma_free_chan_resources - Free channel resources 793 * @chan: DMA channel 794 */ 795 static void xdma_free_chan_resources(struct dma_chan *chan) 796 { 797 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 798 799 vchan_free_chan_resources(&xdma_chan->vchan); 800 dma_pool_destroy(xdma_chan->desc_pool); 801 xdma_chan->desc_pool = NULL; 802 } 803 804 /** 805 * xdma_alloc_chan_resources - Allocate channel resources 806 * @chan: DMA channel 807 */ 808 static int xdma_alloc_chan_resources(struct dma_chan *chan) 809 { 810 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 811 struct xdma_device *xdev = xdma_chan->xdev_hdl; 812 struct device *dev = xdev->dma_dev.dev; 813 814 while (dev && !dev_is_pci(dev)) 815 dev = dev->parent; 816 if (!dev) { 817 xdma_err(xdev, "unable to find pci device"); 818 return -EINVAL; 819 } 820 821 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE, 822 XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); 823 if (!xdma_chan->desc_pool) { 824 xdma_err(xdev, "unable to allocate descriptor pool"); 825 return -ENOMEM; 826 } 827 828 return 0; 829 } 830 831 static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 832 struct dma_tx_state *state) 833 { 834 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 835 struct xdma_desc *desc = NULL; 836 struct virt_dma_desc *vd; 837 enum dma_status ret; 838 unsigned long flags; 839 unsigned int period_idx; 840 u32 residue = 0; 841 842 ret = dma_cookie_status(chan, cookie, state); 843 if (ret == DMA_COMPLETE) 844 return ret; 845 846 spin_lock_irqsave(&xdma_chan->vchan.lock, flags); 847 848 vd = vchan_find_desc(&xdma_chan->vchan, cookie); 849 if (!vd) 850 goto out; 851 852 desc = to_xdma_desc(vd); 853 if (desc->error) { 854 ret = DMA_ERROR; 855 } else if (desc->cyclic) { 856 period_idx = desc->completed_desc_num % desc->periods; 857 residue = (desc->periods - period_idx) * desc->period_size; 858 dma_set_residue(state, residue); 859 } 860 out: 861 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); 862 863 return ret; 864 } 865 866 /** 867 * xdma_channel_isr - XDMA channel interrupt handler 868 * @irq: IRQ number 869 * @dev_id: Pointer to the DMA channel structure 870 */ 871 static irqreturn_t xdma_channel_isr(int irq, void *dev_id) 872 { 873 struct xdma_chan *xchan = dev_id; 874 u32 complete_desc_num = 0; 875 struct xdma_device *xdev = xchan->xdev_hdl; 876 struct virt_dma_desc *vd, *next_vd; 877 struct xdma_desc *desc; 878 int ret; 879 u32 st; 880 bool repeat_tx; 881 882 spin_lock(&xchan->vchan.lock); 883 884 if (xchan->stop_requested) 885 complete(&xchan->last_interrupt); 886 887 /* get submitted request */ 888 vd = vchan_next_desc(&xchan->vchan); 889 if (!vd) 890 goto out; 891 892 /* Clear-on-read the status register */ 893 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st); 894 if (ret) 895 goto out; 896 897 desc = to_xdma_desc(vd); 898 899 st &= XDMA_CHAN_STATUS_MASK; 900 if ((st & XDMA_CHAN_ERROR_MASK) || 901 !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { 902 desc->error = true; 903 xdma_err(xdev, "channel error, status register value: 0x%x", st); 904 goto out; 905 } 906 907 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC, 908 &complete_desc_num); 909 if (ret) 910 goto out; 911 912 if (desc->interleaved_dma) { 913 xchan->busy = false; 914 desc->completed_desc_num += complete_desc_num; 915 if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { 916 xdma_xfer_start(xchan); 917 goto out; 918 } 919 920 /* last desc of any frame */ 921 desc->frames_left--; 922 if (desc->frames_left) 923 goto out; 924 925 /* last desc of the last frame */ 926 repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; 927 next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); 928 if (next_vd) 929 repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); 930 if (repeat_tx) { 931 desc->frames_left = desc->periods; 932 desc->completed_desc_num = 0; 933 vchan_cyclic_callback(vd); 934 } else { 935 list_del(&vd->node); 936 vchan_cookie_complete(vd); 937 } 938 /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ 939 xdma_xfer_start(xchan); 940 } else if (!desc->cyclic) { 941 xchan->busy = false; 942 desc->completed_desc_num += complete_desc_num; 943 944 /* if all data blocks are transferred, remove and complete the request */ 945 if (desc->completed_desc_num == desc->desc_num) { 946 list_del(&vd->node); 947 vchan_cookie_complete(vd); 948 goto out; 949 } 950 951 if (desc->completed_desc_num > desc->desc_num || 952 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) 953 goto out; 954 955 /* transfer the rest of data */ 956 xdma_xfer_start(xchan); 957 } else { 958 desc->completed_desc_num = complete_desc_num; 959 vchan_cyclic_callback(vd); 960 } 961 962 out: 963 spin_unlock(&xchan->vchan.lock); 964 return IRQ_HANDLED; 965 } 966 967 /** 968 * xdma_irq_fini - Uninitialize IRQ 969 * @xdev: DMA device pointer 970 */ 971 static void xdma_irq_fini(struct xdma_device *xdev) 972 { 973 int i; 974 975 /* disable interrupt */ 976 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0); 977 978 /* free irq handler */ 979 for (i = 0; i < xdev->h2c_chan_num; i++) 980 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); 981 982 for (i = 0; i < xdev->c2h_chan_num; i++) 983 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); 984 } 985 986 /** 987 * xdma_set_vector_reg - configure hardware IRQ registers 988 * @xdev: DMA device pointer 989 * @vec_tbl_start: Start of IRQ registers 990 * @irq_start: Start of IRQ 991 * @irq_num: Number of IRQ 992 */ 993 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, 994 u32 irq_start, u32 irq_num) 995 { 996 u32 shift, i, val = 0; 997 int ret; 998 999 /* Each IRQ register is 32 bit and contains 4 IRQs */ 1000 while (irq_num > 0) { 1001 for (i = 0; i < 4; i++) { 1002 shift = XDMA_IRQ_VEC_SHIFT * i; 1003 val |= irq_start << shift; 1004 irq_start++; 1005 irq_num--; 1006 if (!irq_num) 1007 break; 1008 } 1009 1010 /* write IRQ register */ 1011 ret = regmap_write(xdev->rmap, vec_tbl_start, val); 1012 if (ret) 1013 return ret; 1014 vec_tbl_start += sizeof(u32); 1015 val = 0; 1016 } 1017 1018 return 0; 1019 } 1020 1021 /** 1022 * xdma_irq_init - initialize IRQs 1023 * @xdev: DMA device pointer 1024 */ 1025 static int xdma_irq_init(struct xdma_device *xdev) 1026 { 1027 u32 irq = xdev->irq_start; 1028 u32 user_irq_start; 1029 int i, j, ret; 1030 1031 /* return failure if there are not enough IRQs */ 1032 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { 1033 xdma_err(xdev, "not enough irq"); 1034 return -EINVAL; 1035 } 1036 1037 /* setup H2C interrupt handler */ 1038 for (i = 0; i < xdev->h2c_chan_num; i++) { 1039 ret = request_irq(irq, xdma_channel_isr, 0, 1040 "xdma-h2c-channel", &xdev->h2c_chans[i]); 1041 if (ret) { 1042 xdma_err(xdev, "H2C channel%d request irq%d failed: %d", 1043 i, irq, ret); 1044 goto failed_init_h2c; 1045 } 1046 xdev->h2c_chans[i].irq = irq; 1047 irq++; 1048 } 1049 1050 /* setup C2H interrupt handler */ 1051 for (j = 0; j < xdev->c2h_chan_num; j++) { 1052 ret = request_irq(irq, xdma_channel_isr, 0, 1053 "xdma-c2h-channel", &xdev->c2h_chans[j]); 1054 if (ret) { 1055 xdma_err(xdev, "C2H channel%d request irq%d failed: %d", 1056 j, irq, ret); 1057 goto failed_init_c2h; 1058 } 1059 xdev->c2h_chans[j].irq = irq; 1060 irq++; 1061 } 1062 1063 /* config hardware IRQ registers */ 1064 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0, 1065 XDMA_CHAN_NUM(xdev)); 1066 if (ret) { 1067 xdma_err(xdev, "failed to set channel vectors: %d", ret); 1068 goto failed_init_c2h; 1069 } 1070 1071 /* config user IRQ registers if needed */ 1072 user_irq_start = XDMA_CHAN_NUM(xdev); 1073 if (xdev->irq_num > user_irq_start) { 1074 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, 1075 user_irq_start, 1076 xdev->irq_num - user_irq_start); 1077 if (ret) { 1078 xdma_err(xdev, "failed to set user vectors: %d", ret); 1079 goto failed_init_c2h; 1080 } 1081 } 1082 1083 /* enable interrupt */ 1084 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); 1085 if (ret) 1086 goto failed_init_c2h; 1087 1088 return 0; 1089 1090 failed_init_c2h: 1091 while (j--) 1092 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); 1093 failed_init_h2c: 1094 while (i--) 1095 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); 1096 1097 return ret; 1098 } 1099 1100 static bool xdma_filter_fn(struct dma_chan *chan, void *param) 1101 { 1102 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 1103 struct xdma_chan_info *chan_info = param; 1104 1105 return chan_info->dir == xdma_chan->dir; 1106 } 1107 1108 /** 1109 * xdma_disable_user_irq - Disable user interrupt 1110 * @pdev: Pointer to the platform_device structure 1111 * @irq_num: System IRQ number 1112 */ 1113 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) 1114 { 1115 struct xdma_device *xdev = platform_get_drvdata(pdev); 1116 u32 index; 1117 1118 index = irq_num - xdev->irq_start; 1119 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { 1120 xdma_err(xdev, "invalid user irq number"); 1121 return; 1122 } 1123 index -= XDMA_CHAN_NUM(xdev); 1124 1125 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index); 1126 } 1127 EXPORT_SYMBOL(xdma_disable_user_irq); 1128 1129 /** 1130 * xdma_enable_user_irq - Enable user logic interrupt 1131 * @pdev: Pointer to the platform_device structure 1132 * @irq_num: System IRQ number 1133 */ 1134 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) 1135 { 1136 struct xdma_device *xdev = platform_get_drvdata(pdev); 1137 u32 index; 1138 int ret; 1139 1140 index = irq_num - xdev->irq_start; 1141 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { 1142 xdma_err(xdev, "invalid user irq number"); 1143 return -EINVAL; 1144 } 1145 index -= XDMA_CHAN_NUM(xdev); 1146 1147 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); 1148 if (ret) 1149 return ret; 1150 1151 return 0; 1152 } 1153 EXPORT_SYMBOL(xdma_enable_user_irq); 1154 1155 /** 1156 * xdma_get_user_irq - Get system IRQ number 1157 * @pdev: Pointer to the platform_device structure 1158 * @user_irq_index: User logic IRQ wire index 1159 * 1160 * Return: The system IRQ number allocated for the given wire index. 1161 */ 1162 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) 1163 { 1164 struct xdma_device *xdev = platform_get_drvdata(pdev); 1165 1166 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { 1167 xdma_err(xdev, "invalid user irq index"); 1168 return -EINVAL; 1169 } 1170 1171 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; 1172 } 1173 EXPORT_SYMBOL(xdma_get_user_irq); 1174 1175 /** 1176 * xdma_remove - Driver remove function 1177 * @pdev: Pointer to the platform_device structure 1178 */ 1179 static void xdma_remove(struct platform_device *pdev) 1180 { 1181 struct xdma_device *xdev = platform_get_drvdata(pdev); 1182 1183 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) 1184 xdma_irq_fini(xdev); 1185 1186 if (xdev->status & XDMA_DEV_STATUS_REG_DMA) 1187 dma_async_device_unregister(&xdev->dma_dev); 1188 } 1189 1190 /** 1191 * xdma_probe - Driver probe function 1192 * @pdev: Pointer to the platform_device structure 1193 */ 1194 static int xdma_probe(struct platform_device *pdev) 1195 { 1196 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev); 1197 struct xdma_device *xdev; 1198 void __iomem *reg_base; 1199 struct resource *res; 1200 int ret = -ENODEV; 1201 1202 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { 1203 dev_err(&pdev->dev, "invalid max dma channels %d", 1204 pdata->max_dma_channels); 1205 return -EINVAL; 1206 } 1207 1208 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 1209 if (!xdev) 1210 return -ENOMEM; 1211 1212 platform_set_drvdata(pdev, xdev); 1213 xdev->pdev = pdev; 1214 1215 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1216 if (!res) { 1217 xdma_err(xdev, "failed to get irq resource"); 1218 goto failed; 1219 } 1220 xdev->irq_start = res->start; 1221 xdev->irq_num = resource_size(res); 1222 1223 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1224 if (!res) { 1225 xdma_err(xdev, "failed to get io resource"); 1226 goto failed; 1227 } 1228 1229 reg_base = devm_ioremap_resource(&pdev->dev, res); 1230 if (IS_ERR(reg_base)) { 1231 xdma_err(xdev, "ioremap failed"); 1232 goto failed; 1233 } 1234 1235 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, 1236 &xdma_regmap_config); 1237 if (!xdev->rmap) { 1238 xdma_err(xdev, "config regmap failed: %d", ret); 1239 goto failed; 1240 } 1241 INIT_LIST_HEAD(&xdev->dma_dev.channels); 1242 1243 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV); 1244 if (ret) { 1245 xdma_err(xdev, "config H2C channels failed: %d", ret); 1246 goto failed; 1247 } 1248 1249 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM); 1250 if (ret) { 1251 xdma_err(xdev, "config C2H channels failed: %d", ret); 1252 goto failed; 1253 } 1254 1255 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); 1256 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); 1257 dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); 1258 dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); 1259 dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); 1260 dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); 1261 1262 xdev->dma_dev.dev = &pdev->dev; 1263 xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 1264 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; 1265 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; 1266 xdev->dma_dev.device_tx_status = xdma_tx_status; 1267 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; 1268 xdev->dma_dev.device_config = xdma_device_config; 1269 xdev->dma_dev.device_issue_pending = xdma_issue_pending; 1270 xdev->dma_dev.device_terminate_all = xdma_terminate_all; 1271 xdev->dma_dev.device_synchronize = xdma_synchronize; 1272 xdev->dma_dev.filter.map = pdata->device_map; 1273 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; 1274 xdev->dma_dev.filter.fn = xdma_filter_fn; 1275 xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; 1276 xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; 1277 1278 ret = dma_async_device_register(&xdev->dma_dev); 1279 if (ret) { 1280 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); 1281 goto failed; 1282 } 1283 xdev->status |= XDMA_DEV_STATUS_REG_DMA; 1284 1285 ret = xdma_irq_init(xdev); 1286 if (ret) { 1287 xdma_err(xdev, "failed to init msix: %d", ret); 1288 goto failed; 1289 } 1290 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; 1291 1292 return 0; 1293 1294 failed: 1295 xdma_remove(pdev); 1296 1297 return ret; 1298 } 1299 1300 static const struct platform_device_id xdma_id_table[] = { 1301 { "xdma", 0}, 1302 { }, 1303 }; 1304 MODULE_DEVICE_TABLE(platform, xdma_id_table); 1305 1306 static struct platform_driver xdma_driver = { 1307 .driver = { 1308 .name = "xdma", 1309 }, 1310 .id_table = xdma_id_table, 1311 .probe = xdma_probe, 1312 .remove = xdma_remove, 1313 }; 1314 1315 module_platform_driver(xdma_driver); 1316 1317 MODULE_DESCRIPTION("AMD XDMA driver"); 1318 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>"); 1319 MODULE_LICENSE("GPL"); 1320