1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx ZynqMP DMA Engine 4 * 5 * Copyright (C) 2016 Xilinx, Inc. All rights reserved. 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_dma.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/clk.h> 19 #include <linux/io-64-nonatomic-lo-hi.h> 20 #include <linux/pm_runtime.h> 21 22 #include "../dmaengine.h" 23 24 /* Register Offsets */ 25 #define ZYNQMP_DMA_ISR (chan->irq_offset + 0x100) 26 #define ZYNQMP_DMA_IMR (chan->irq_offset + 0x104) 27 #define ZYNQMP_DMA_IER (chan->irq_offset + 0x108) 28 #define ZYNQMP_DMA_IDS (chan->irq_offset + 0x10c) 29 #define ZYNQMP_DMA_CTRL0 0x110 30 #define ZYNQMP_DMA_CTRL1 0x114 31 #define ZYNQMP_DMA_DATA_ATTR 0x120 32 #define ZYNQMP_DMA_DSCR_ATTR 0x124 33 #define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 34 #define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C 35 #define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 36 #define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 37 #define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 38 #define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C 39 #define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 40 #define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 41 #define ZYNQMP_DMA_SRC_START_LSB 0x158 42 #define ZYNQMP_DMA_SRC_START_MSB 0x15C 43 #define ZYNQMP_DMA_DST_START_LSB 0x160 44 #define ZYNQMP_DMA_DST_START_MSB 0x164 45 #define ZYNQMP_DMA_TOTAL_BYTE 0x188 46 #define ZYNQMP_DMA_RATE_CTRL 0x18C 47 #define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 48 #define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 49 #define ZYNQMP_DMA_CTRL2 0x200 50 51 /* Interrupt registers bit field definitions */ 52 #define ZYNQMP_DMA_DONE BIT(10) 53 #define ZYNQMP_DMA_AXI_WR_DATA BIT(9) 54 #define ZYNQMP_DMA_AXI_RD_DATA BIT(8) 55 #define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) 56 #define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) 57 #define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) 58 #define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) 59 #define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) 60 #define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) 61 #define ZYNQMP_DMA_INV_APB BIT(0) 62 63 /* Control 0 register bit field definitions */ 64 #define ZYNQMP_DMA_OVR_FETCH BIT(7) 65 #define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) 66 #define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) 67 68 /* Control 1 register bit field definitions */ 69 #define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) 70 71 /* Data Attribute register bit field definitions */ 72 #define ZYNQMP_DMA_ARBURST GENMASK(27, 26) 73 #define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) 74 #define ZYNQMP_DMA_ARCACHE_OFST 22 75 #define ZYNQMP_DMA_ARQOS GENMASK(21, 18) 76 #define ZYNQMP_DMA_ARQOS_OFST 18 77 #define ZYNQMP_DMA_ARLEN GENMASK(17, 14) 78 #define ZYNQMP_DMA_ARLEN_OFST 14 79 #define ZYNQMP_DMA_AWBURST GENMASK(13, 12) 80 #define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) 81 #define ZYNQMP_DMA_AWCACHE_OFST 8 82 #define ZYNQMP_DMA_AWQOS GENMASK(7, 4) 83 #define ZYNQMP_DMA_AWQOS_OFST 4 84 #define ZYNQMP_DMA_AWLEN GENMASK(3, 0) 85 #define ZYNQMP_DMA_AWLEN_OFST 0 86 87 /* Descriptor Attribute register bit field definitions */ 88 #define ZYNQMP_DMA_AXCOHRNT BIT(8) 89 #define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) 90 #define ZYNQMP_DMA_AXCACHE_OFST 4 91 #define ZYNQMP_DMA_AXQOS GENMASK(3, 0) 92 #define ZYNQMP_DMA_AXQOS_OFST 0 93 94 /* Control register 2 bit field definitions */ 95 #define ZYNQMP_DMA_ENABLE BIT(0) 96 97 /* Buffer Descriptor definitions */ 98 #define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 99 #define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 100 #define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 101 #define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 102 103 /* Interrupt Mask specific definitions */ 104 #define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ 105 ZYNQMP_DMA_AXI_WR_DATA | \ 106 ZYNQMP_DMA_AXI_RD_DST_DSCR | \ 107 ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ 108 ZYNQMP_DMA_INV_APB) 109 #define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ 110 ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ 111 ZYNQMP_DMA_IRQ_DST_ACCT_ERR) 112 #define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) 113 #define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ 114 ZYNQMP_DMA_INT_ERR | \ 115 ZYNQMP_DMA_INT_OVRFL | \ 116 ZYNQMP_DMA_DST_DSCR_DONE) 117 118 /* Max number of descriptors per channel */ 119 #define ZYNQMP_DMA_NUM_DESCS 32 120 121 /* Max transfer size per descriptor */ 122 #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 123 124 /* Max burst lengths */ 125 #define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U 126 #define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U 127 128 /* Reset values for data attributes */ 129 #define ZYNQMP_DMA_AXCACHE_VAL 0xF 130 131 #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F 132 133 #define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF 134 135 /* Bus width in bits */ 136 #define ZYNQMP_DMA_BUS_WIDTH_64 64 137 #define ZYNQMP_DMA_BUS_WIDTH_128 128 138 139 #define ZDMA_PM_TIMEOUT 100 140 141 #define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) 142 143 #define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ 144 common) 145 #define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ 146 async_tx) 147 148 /* IRQ Register offset for Versal Gen 2 */ 149 #define IRQ_REG_OFFSET 0x308 150 151 /** 152 * struct zynqmp_dma_desc_ll - Hw linked list descriptor 153 * @addr: Buffer address 154 * @size: Size of the buffer 155 * @ctrl: Control word 156 * @nxtdscraddr: Next descriptor base address 157 * @rsvd: Reserved field and for Hw internal use. 158 */ 159 struct zynqmp_dma_desc_ll { 160 u64 addr; 161 u32 size; 162 u32 ctrl; 163 u64 nxtdscraddr; 164 u64 rsvd; 165 }; 166 167 /** 168 * struct zynqmp_dma_desc_sw - Per Transaction structure 169 * @src: Source address for simple mode dma 170 * @dst: Destination address for simple mode dma 171 * @len: Transfer length for simple mode dma 172 * @node: Node in the channel descriptor list 173 * @tx_list: List head for the current transfer 174 * @async_tx: Async transaction descriptor 175 * @src_v: Virtual address of the src descriptor 176 * @src_p: Physical address of the src descriptor 177 * @dst_v: Virtual address of the dst descriptor 178 * @dst_p: Physical address of the dst descriptor 179 */ 180 struct zynqmp_dma_desc_sw { 181 u64 src; 182 u64 dst; 183 u32 len; 184 struct list_head node; 185 struct list_head tx_list; 186 struct dma_async_tx_descriptor async_tx; 187 struct zynqmp_dma_desc_ll *src_v; 188 dma_addr_t src_p; 189 struct zynqmp_dma_desc_ll *dst_v; 190 dma_addr_t dst_p; 191 }; 192 193 /** 194 * struct zynqmp_dma_chan - Driver specific DMA channel structure 195 * @zdev: Driver specific device structure 196 * @regs: Control registers offset 197 * @lock: Descriptor operation lock 198 * @pending_list: Descriptors waiting 199 * @free_list: Descriptors free 200 * @active_list: Descriptors active 201 * @sw_desc_pool: SW descriptor pool 202 * @done_list: Complete descriptors 203 * @common: DMA common channel 204 * @desc_pool_v: Statically allocated descriptor base 205 * @desc_pool_p: Physical allocated descriptor base 206 * @desc_free_cnt: Descriptor available count 207 * @dev: The dma device 208 * @irq: Channel IRQ 209 * @is_dmacoherent: Tells whether dma operations are coherent or not 210 * @tasklet: Cleanup work after irq 211 * @idle : Channel status; 212 * @desc_size: Size of the low level descriptor 213 * @err: Channel has errors 214 * @bus_width: Bus width 215 * @src_burst_len: Source burst length 216 * @dst_burst_len: Dest burst length 217 * @irq_offset: Irq register offset 218 */ 219 struct zynqmp_dma_chan { 220 struct zynqmp_dma_device *zdev; 221 void __iomem *regs; 222 spinlock_t lock; 223 struct list_head pending_list; 224 struct list_head free_list; 225 struct list_head active_list; 226 struct zynqmp_dma_desc_sw *sw_desc_pool; 227 struct list_head done_list; 228 struct dma_chan common; 229 void *desc_pool_v; 230 dma_addr_t desc_pool_p; 231 u32 desc_free_cnt; 232 struct device *dev; 233 int irq; 234 bool is_dmacoherent; 235 struct tasklet_struct tasklet; 236 bool idle; 237 size_t desc_size; 238 bool err; 239 u32 bus_width; 240 u32 src_burst_len; 241 u32 dst_burst_len; 242 u32 irq_offset; 243 }; 244 245 /** 246 * struct zynqmp_dma_device - DMA device structure 247 * @dev: Device Structure 248 * @common: DMA device structure 249 * @chan: Driver specific DMA channel 250 * @clk_main: Pointer to main clock 251 * @clk_apb: Pointer to apb clock 252 */ 253 struct zynqmp_dma_device { 254 struct device *dev; 255 struct dma_device common; 256 struct zynqmp_dma_chan *chan; 257 struct clk *clk_main; 258 struct clk *clk_apb; 259 }; 260 261 struct zynqmp_dma_config { 262 u32 offset; 263 }; 264 265 static const struct zynqmp_dma_config versal2_dma_config = { 266 .offset = IRQ_REG_OFFSET, 267 }; 268 269 static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, 270 u64 value) 271 { 272 lo_hi_writeq(value, chan->regs + reg); 273 } 274 275 /** 276 * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller 277 * @chan: ZynqMP DMA DMA channel pointer 278 * @desc: Transaction descriptor pointer 279 */ 280 static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, 281 struct zynqmp_dma_desc_sw *desc) 282 { 283 dma_addr_t addr; 284 285 addr = desc->src_p; 286 zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); 287 addr = desc->dst_p; 288 zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); 289 } 290 291 /** 292 * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor 293 * @chan: ZynqMP DMA channel pointer 294 * @desc: Hw descriptor pointer 295 */ 296 static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, 297 void *desc) 298 { 299 struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; 300 301 hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; 302 hw++; 303 hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; 304 } 305 306 /** 307 * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor 308 * @chan: ZynqMP DMA channel pointer 309 * @sdesc: Hw descriptor pointer 310 * @src: Source buffer address 311 * @dst: Destination buffer address 312 * @len: Transfer length 313 * @prev: Previous hw descriptor pointer 314 */ 315 static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, 316 struct zynqmp_dma_desc_ll *sdesc, 317 dma_addr_t src, dma_addr_t dst, size_t len, 318 struct zynqmp_dma_desc_ll *prev) 319 { 320 struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; 321 322 sdesc->size = ddesc->size = len; 323 sdesc->addr = src; 324 ddesc->addr = dst; 325 326 sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; 327 if (chan->is_dmacoherent) { 328 sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; 329 ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; 330 } 331 332 if (prev) { 333 dma_addr_t addr = chan->desc_pool_p + 334 ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); 335 ddesc = prev + 1; 336 prev->nxtdscraddr = addr; 337 ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); 338 } 339 } 340 341 /** 342 * zynqmp_dma_init - Initialize the channel 343 * @chan: ZynqMP DMA channel pointer 344 */ 345 static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) 346 { 347 u32 val; 348 349 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); 350 val = readl(chan->regs + ZYNQMP_DMA_ISR); 351 writel(val, chan->regs + ZYNQMP_DMA_ISR); 352 353 if (chan->is_dmacoherent) { 354 val = ZYNQMP_DMA_AXCOHRNT; 355 val = (val & ~ZYNQMP_DMA_AXCACHE) | 356 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); 357 writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); 358 } 359 360 val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); 361 if (chan->is_dmacoherent) { 362 val = (val & ~ZYNQMP_DMA_ARCACHE) | 363 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); 364 val = (val & ~ZYNQMP_DMA_AWCACHE) | 365 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); 366 } 367 writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); 368 369 /* Clearing the interrupt account rgisters */ 370 val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); 371 val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 372 373 chan->idle = true; 374 } 375 376 /** 377 * zynqmp_dma_tx_submit - Submit DMA transaction 378 * @tx: Async transaction descriptor pointer 379 * 380 * Return: cookie value 381 */ 382 static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) 383 { 384 struct zynqmp_dma_chan *chan = to_chan(tx->chan); 385 struct zynqmp_dma_desc_sw *desc, *new; 386 dma_cookie_t cookie; 387 unsigned long irqflags; 388 389 new = tx_to_desc(tx); 390 spin_lock_irqsave(&chan->lock, irqflags); 391 cookie = dma_cookie_assign(tx); 392 393 if (!list_empty(&chan->pending_list)) { 394 desc = list_last_entry(&chan->pending_list, 395 struct zynqmp_dma_desc_sw, node); 396 if (!list_empty(&desc->tx_list)) 397 desc = list_last_entry(&desc->tx_list, 398 struct zynqmp_dma_desc_sw, node); 399 desc->src_v->nxtdscraddr = new->src_p; 400 desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; 401 desc->dst_v->nxtdscraddr = new->dst_p; 402 desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; 403 } 404 405 list_add_tail(&new->node, &chan->pending_list); 406 spin_unlock_irqrestore(&chan->lock, irqflags); 407 408 return cookie; 409 } 410 411 /** 412 * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool 413 * @chan: ZynqMP DMA channel pointer 414 * 415 * Return: The sw descriptor 416 */ 417 static struct zynqmp_dma_desc_sw * 418 zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) 419 { 420 struct zynqmp_dma_desc_sw *desc; 421 unsigned long irqflags; 422 423 spin_lock_irqsave(&chan->lock, irqflags); 424 desc = list_first_entry(&chan->free_list, 425 struct zynqmp_dma_desc_sw, node); 426 list_del(&desc->node); 427 spin_unlock_irqrestore(&chan->lock, irqflags); 428 429 INIT_LIST_HEAD(&desc->tx_list); 430 /* Clear the src and dst descriptor memory */ 431 memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); 432 memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); 433 434 return desc; 435 } 436 437 /** 438 * zynqmp_dma_free_descriptor - Issue pending transactions 439 * @chan: ZynqMP DMA channel pointer 440 * @sdesc: Transaction descriptor pointer 441 */ 442 static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, 443 struct zynqmp_dma_desc_sw *sdesc) 444 { 445 struct zynqmp_dma_desc_sw *child, *next; 446 447 chan->desc_free_cnt++; 448 list_move_tail(&sdesc->node, &chan->free_list); 449 list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { 450 chan->desc_free_cnt++; 451 list_move_tail(&child->node, &chan->free_list); 452 } 453 } 454 455 /** 456 * zynqmp_dma_free_desc_list - Free descriptors list 457 * @chan: ZynqMP DMA channel pointer 458 * @list: List to parse and delete the descriptor 459 */ 460 static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, 461 struct list_head *list) 462 { 463 struct zynqmp_dma_desc_sw *desc, *next; 464 465 list_for_each_entry_safe(desc, next, list, node) 466 zynqmp_dma_free_descriptor(chan, desc); 467 } 468 469 /** 470 * zynqmp_dma_alloc_chan_resources - Allocate channel resources 471 * @dchan: DMA channel 472 * 473 * Return: Number of descriptors on success and failure value on error 474 */ 475 static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) 476 { 477 struct zynqmp_dma_chan *chan = to_chan(dchan); 478 struct zynqmp_dma_desc_sw *desc; 479 int i, ret; 480 481 ret = pm_runtime_resume_and_get(chan->dev); 482 if (ret < 0) 483 return ret; 484 485 chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc), 486 GFP_KERNEL); 487 if (!chan->sw_desc_pool) 488 return -ENOMEM; 489 490 chan->idle = true; 491 chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; 492 493 INIT_LIST_HEAD(&chan->free_list); 494 495 for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { 496 desc = chan->sw_desc_pool + i; 497 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 498 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; 499 list_add_tail(&desc->node, &chan->free_list); 500 } 501 502 chan->desc_pool_v = dma_alloc_coherent(chan->dev, 503 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * 504 ZYNQMP_DMA_NUM_DESCS), 505 &chan->desc_pool_p, GFP_KERNEL); 506 if (!chan->desc_pool_v) 507 return -ENOMEM; 508 509 for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { 510 desc = chan->sw_desc_pool + i; 511 desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + 512 (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); 513 desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); 514 desc->src_p = chan->desc_pool_p + 515 (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); 516 desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); 517 } 518 519 return ZYNQMP_DMA_NUM_DESCS; 520 } 521 522 /** 523 * zynqmp_dma_start - Start DMA channel 524 * @chan: ZynqMP DMA channel pointer 525 */ 526 static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) 527 { 528 writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); 529 writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); 530 chan->idle = false; 531 writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); 532 } 533 534 /** 535 * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt 536 * @chan: ZynqMP DMA channel pointer 537 * @status: Interrupt status value 538 */ 539 static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) 540 { 541 if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL) 542 writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); 543 if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) 544 readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 545 if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) 546 readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); 547 } 548 549 static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) 550 { 551 u32 val, burst_val; 552 553 val = readl(chan->regs + ZYNQMP_DMA_CTRL0); 554 val |= ZYNQMP_DMA_POINT_TYPE_SG; 555 writel(val, chan->regs + ZYNQMP_DMA_CTRL0); 556 557 val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); 558 burst_val = __ilog2_u32(chan->src_burst_len); 559 val = (val & ~ZYNQMP_DMA_ARLEN) | 560 ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); 561 burst_val = __ilog2_u32(chan->dst_burst_len); 562 val = (val & ~ZYNQMP_DMA_AWLEN) | 563 ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); 564 writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); 565 } 566 567 /** 568 * zynqmp_dma_device_config - Zynqmp dma device configuration 569 * @dchan: DMA channel 570 * @config: DMA device config 571 * 572 * Return: 0 always 573 */ 574 static int zynqmp_dma_device_config(struct dma_chan *dchan, 575 struct dma_slave_config *config) 576 { 577 struct zynqmp_dma_chan *chan = to_chan(dchan); 578 579 chan->src_burst_len = clamp(config->src_maxburst, 1U, 580 ZYNQMP_DMA_MAX_SRC_BURST_LEN); 581 chan->dst_burst_len = clamp(config->dst_maxburst, 1U, 582 ZYNQMP_DMA_MAX_DST_BURST_LEN); 583 584 return 0; 585 } 586 587 /** 588 * zynqmp_dma_start_transfer - Initiate the new transfer 589 * @chan: ZynqMP DMA channel pointer 590 */ 591 static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) 592 { 593 struct zynqmp_dma_desc_sw *desc; 594 595 if (!chan->idle) 596 return; 597 598 zynqmp_dma_config(chan); 599 600 desc = list_first_entry_or_null(&chan->pending_list, 601 struct zynqmp_dma_desc_sw, node); 602 if (!desc) 603 return; 604 605 list_splice_tail_init(&chan->pending_list, &chan->active_list); 606 zynqmp_dma_update_desc_to_ctrlr(chan, desc); 607 zynqmp_dma_start(chan); 608 } 609 610 611 /** 612 * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors 613 * @chan: ZynqMP DMA channel 614 */ 615 static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) 616 { 617 struct zynqmp_dma_desc_sw *desc, *next; 618 unsigned long irqflags; 619 620 spin_lock_irqsave(&chan->lock, irqflags); 621 622 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 623 struct dmaengine_desc_callback cb; 624 625 dmaengine_desc_get_callback(&desc->async_tx, &cb); 626 if (dmaengine_desc_callback_valid(&cb)) { 627 spin_unlock_irqrestore(&chan->lock, irqflags); 628 dmaengine_desc_callback_invoke(&cb, NULL); 629 spin_lock_irqsave(&chan->lock, irqflags); 630 } 631 632 /* Run any dependencies, then free the descriptor */ 633 zynqmp_dma_free_descriptor(chan, desc); 634 } 635 636 spin_unlock_irqrestore(&chan->lock, irqflags); 637 } 638 639 /** 640 * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete 641 * @chan: ZynqMP DMA channel pointer 642 */ 643 static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) 644 { 645 struct zynqmp_dma_desc_sw *desc; 646 647 desc = list_first_entry_or_null(&chan->active_list, 648 struct zynqmp_dma_desc_sw, node); 649 if (!desc) 650 return; 651 list_del(&desc->node); 652 dma_cookie_complete(&desc->async_tx); 653 list_add_tail(&desc->node, &chan->done_list); 654 } 655 656 /** 657 * zynqmp_dma_issue_pending - Issue pending transactions 658 * @dchan: DMA channel pointer 659 */ 660 static void zynqmp_dma_issue_pending(struct dma_chan *dchan) 661 { 662 struct zynqmp_dma_chan *chan = to_chan(dchan); 663 unsigned long irqflags; 664 665 spin_lock_irqsave(&chan->lock, irqflags); 666 zynqmp_dma_start_transfer(chan); 667 spin_unlock_irqrestore(&chan->lock, irqflags); 668 } 669 670 /** 671 * zynqmp_dma_free_descriptors - Free channel descriptors 672 * @chan: ZynqMP DMA channel pointer 673 */ 674 static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) 675 { 676 unsigned long irqflags; 677 678 spin_lock_irqsave(&chan->lock, irqflags); 679 zynqmp_dma_free_desc_list(chan, &chan->active_list); 680 zynqmp_dma_free_desc_list(chan, &chan->pending_list); 681 zynqmp_dma_free_desc_list(chan, &chan->done_list); 682 spin_unlock_irqrestore(&chan->lock, irqflags); 683 } 684 685 /** 686 * zynqmp_dma_free_chan_resources - Free channel resources 687 * @dchan: DMA channel pointer 688 */ 689 static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) 690 { 691 struct zynqmp_dma_chan *chan = to_chan(dchan); 692 693 zynqmp_dma_free_descriptors(chan); 694 dma_free_coherent(chan->dev, 695 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), 696 chan->desc_pool_v, chan->desc_pool_p); 697 kfree(chan->sw_desc_pool); 698 pm_runtime_mark_last_busy(chan->dev); 699 pm_runtime_put_autosuspend(chan->dev); 700 } 701 702 /** 703 * zynqmp_dma_reset - Reset the channel 704 * @chan: ZynqMP DMA channel pointer 705 */ 706 static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) 707 { 708 unsigned long irqflags; 709 710 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); 711 712 spin_lock_irqsave(&chan->lock, irqflags); 713 zynqmp_dma_complete_descriptor(chan); 714 spin_unlock_irqrestore(&chan->lock, irqflags); 715 zynqmp_dma_chan_desc_cleanup(chan); 716 zynqmp_dma_free_descriptors(chan); 717 718 zynqmp_dma_init(chan); 719 } 720 721 /** 722 * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler 723 * @irq: IRQ number 724 * @data: Pointer to the ZynqMP DMA channel structure 725 * 726 * Return: IRQ_HANDLED/IRQ_NONE 727 */ 728 static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) 729 { 730 struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; 731 u32 isr, imr, status; 732 irqreturn_t ret = IRQ_NONE; 733 734 isr = readl(chan->regs + ZYNQMP_DMA_ISR); 735 imr = readl(chan->regs + ZYNQMP_DMA_IMR); 736 status = isr & ~imr; 737 738 writel(isr, chan->regs + ZYNQMP_DMA_ISR); 739 if (status & ZYNQMP_DMA_INT_DONE) { 740 tasklet_schedule(&chan->tasklet); 741 ret = IRQ_HANDLED; 742 } 743 744 if (status & ZYNQMP_DMA_DONE) 745 chan->idle = true; 746 747 if (status & ZYNQMP_DMA_INT_ERR) { 748 chan->err = true; 749 tasklet_schedule(&chan->tasklet); 750 dev_err(chan->dev, "Channel %p has errors\n", chan); 751 ret = IRQ_HANDLED; 752 } 753 754 if (status & ZYNQMP_DMA_INT_OVRFL) { 755 zynqmp_dma_handle_ovfl_int(chan, status); 756 dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan); 757 ret = IRQ_HANDLED; 758 } 759 760 return ret; 761 } 762 763 /** 764 * zynqmp_dma_do_tasklet - Schedule completion tasklet 765 * @t: Pointer to the ZynqMP DMA channel structure 766 */ 767 static void zynqmp_dma_do_tasklet(struct tasklet_struct *t) 768 { 769 struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet); 770 u32 count; 771 unsigned long irqflags; 772 773 if (chan->err) { 774 zynqmp_dma_reset(chan); 775 chan->err = false; 776 return; 777 } 778 779 spin_lock_irqsave(&chan->lock, irqflags); 780 count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 781 while (count) { 782 zynqmp_dma_complete_descriptor(chan); 783 count--; 784 } 785 spin_unlock_irqrestore(&chan->lock, irqflags); 786 787 zynqmp_dma_chan_desc_cleanup(chan); 788 789 if (chan->idle) { 790 spin_lock_irqsave(&chan->lock, irqflags); 791 zynqmp_dma_start_transfer(chan); 792 spin_unlock_irqrestore(&chan->lock, irqflags); 793 } 794 } 795 796 /** 797 * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel 798 * @dchan: DMA channel pointer 799 * 800 * Return: Always '0' 801 */ 802 static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) 803 { 804 struct zynqmp_dma_chan *chan = to_chan(dchan); 805 806 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); 807 zynqmp_dma_free_descriptors(chan); 808 809 return 0; 810 } 811 812 /** 813 * zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context. 814 * @dchan: DMA channel pointer 815 */ 816 static void zynqmp_dma_synchronize(struct dma_chan *dchan) 817 { 818 struct zynqmp_dma_chan *chan = to_chan(dchan); 819 820 tasklet_kill(&chan->tasklet); 821 } 822 823 /** 824 * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction 825 * @dchan: DMA channel 826 * @dma_dst: Destination buffer address 827 * @dma_src: Source buffer address 828 * @len: Transfer length 829 * @flags: transfer ack flags 830 * 831 * Return: Async transaction descriptor on success and NULL on failure 832 */ 833 static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( 834 struct dma_chan *dchan, dma_addr_t dma_dst, 835 dma_addr_t dma_src, size_t len, ulong flags) 836 { 837 struct zynqmp_dma_chan *chan; 838 struct zynqmp_dma_desc_sw *new, *first = NULL; 839 void *desc = NULL, *prev = NULL; 840 size_t copy; 841 u32 desc_cnt; 842 unsigned long irqflags; 843 844 chan = to_chan(dchan); 845 846 desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); 847 848 spin_lock_irqsave(&chan->lock, irqflags); 849 if (desc_cnt > chan->desc_free_cnt) { 850 spin_unlock_irqrestore(&chan->lock, irqflags); 851 dev_dbg(chan->dev, "chan %p descs are not available\n", chan); 852 return NULL; 853 } 854 chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; 855 spin_unlock_irqrestore(&chan->lock, irqflags); 856 857 do { 858 /* Allocate and populate the descriptor */ 859 new = zynqmp_dma_get_descriptor(chan); 860 861 copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); 862 desc = (struct zynqmp_dma_desc_ll *)new->src_v; 863 zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, 864 dma_dst, copy, prev); 865 prev = desc; 866 len -= copy; 867 dma_src += copy; 868 dma_dst += copy; 869 if (!first) 870 first = new; 871 else 872 list_add_tail(&new->node, &first->tx_list); 873 } while (len); 874 875 zynqmp_dma_desc_config_eod(chan, desc); 876 async_tx_ack(&first->async_tx); 877 first->async_tx.flags = (enum dma_ctrl_flags)flags; 878 return &first->async_tx; 879 } 880 881 /** 882 * zynqmp_dma_chan_remove - Channel remove function 883 * @chan: ZynqMP DMA channel pointer 884 */ 885 static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) 886 { 887 if (!chan) 888 return; 889 890 if (chan->irq) 891 devm_free_irq(chan->zdev->dev, chan->irq, chan); 892 tasklet_kill(&chan->tasklet); 893 list_del(&chan->common.device_node); 894 } 895 896 /** 897 * zynqmp_dma_chan_probe - Per Channel Probing 898 * @zdev: Driver specific device structure 899 * @pdev: Pointer to the platform_device structure 900 * 901 * Return: '0' on success and failure value on error 902 */ 903 static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, 904 struct platform_device *pdev) 905 { 906 struct zynqmp_dma_chan *chan; 907 struct device_node *node = pdev->dev.of_node; 908 const struct zynqmp_dma_config *match_data; 909 int err; 910 911 chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); 912 if (!chan) 913 return -ENOMEM; 914 chan->dev = zdev->dev; 915 chan->zdev = zdev; 916 917 chan->regs = devm_platform_ioremap_resource(pdev, 0); 918 if (IS_ERR(chan->regs)) 919 return PTR_ERR(chan->regs); 920 921 chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; 922 chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; 923 chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; 924 err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); 925 if (err < 0) { 926 dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); 927 return err; 928 } 929 930 if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && 931 chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { 932 dev_err(zdev->dev, "invalid bus-width value"); 933 return -EINVAL; 934 } 935 936 match_data = of_device_get_match_data(&pdev->dev); 937 if (match_data) 938 chan->irq_offset = match_data->offset; 939 940 chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); 941 zdev->chan = chan; 942 tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet); 943 spin_lock_init(&chan->lock); 944 INIT_LIST_HEAD(&chan->active_list); 945 INIT_LIST_HEAD(&chan->pending_list); 946 INIT_LIST_HEAD(&chan->done_list); 947 INIT_LIST_HEAD(&chan->free_list); 948 949 dma_cookie_init(&chan->common); 950 chan->common.device = &zdev->common; 951 list_add_tail(&chan->common.device_node, &zdev->common.channels); 952 953 zynqmp_dma_init(chan); 954 chan->irq = platform_get_irq(pdev, 0); 955 if (chan->irq < 0) 956 return -ENXIO; 957 err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, 958 "zynqmp-dma", chan); 959 if (err) 960 return err; 961 962 chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); 963 chan->idle = true; 964 return 0; 965 } 966 967 /** 968 * of_zynqmp_dma_xlate - Translation function 969 * @dma_spec: Pointer to DMA specifier as found in the device tree 970 * @ofdma: Pointer to DMA controller data 971 * 972 * Return: DMA channel pointer on success and NULL on error 973 */ 974 static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, 975 struct of_dma *ofdma) 976 { 977 struct zynqmp_dma_device *zdev = ofdma->of_dma_data; 978 979 return dma_get_slave_channel(&zdev->chan->common); 980 } 981 982 /** 983 * zynqmp_dma_suspend - Suspend method for the driver 984 * @dev: Address of the device structure 985 * 986 * Put the driver into low power mode. 987 * Return: 0 on success and failure value on error 988 */ 989 static int __maybe_unused zynqmp_dma_suspend(struct device *dev) 990 { 991 if (!device_may_wakeup(dev)) 992 return pm_runtime_force_suspend(dev); 993 994 return 0; 995 } 996 997 /** 998 * zynqmp_dma_resume - Resume from suspend 999 * @dev: Address of the device structure 1000 * 1001 * Resume operation after suspend. 1002 * Return: 0 on success and failure value on error 1003 */ 1004 static int __maybe_unused zynqmp_dma_resume(struct device *dev) 1005 { 1006 if (!device_may_wakeup(dev)) 1007 return pm_runtime_force_resume(dev); 1008 1009 return 0; 1010 } 1011 1012 /** 1013 * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver 1014 * @dev: Address of the device structure 1015 * 1016 * Put the driver into low power mode. 1017 * Return: 0 always 1018 */ 1019 static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev) 1020 { 1021 struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); 1022 1023 clk_disable_unprepare(zdev->clk_main); 1024 clk_disable_unprepare(zdev->clk_apb); 1025 1026 return 0; 1027 } 1028 1029 /** 1030 * zynqmp_dma_runtime_resume - Runtime suspend method for the driver 1031 * @dev: Address of the device structure 1032 * 1033 * Put the driver into low power mode. 1034 * Return: 0 always 1035 */ 1036 static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev) 1037 { 1038 struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); 1039 int err; 1040 1041 err = clk_prepare_enable(zdev->clk_main); 1042 if (err) { 1043 dev_err(dev, "Unable to enable main clock.\n"); 1044 return err; 1045 } 1046 1047 err = clk_prepare_enable(zdev->clk_apb); 1048 if (err) { 1049 dev_err(dev, "Unable to enable apb clock.\n"); 1050 clk_disable_unprepare(zdev->clk_main); 1051 return err; 1052 } 1053 1054 return 0; 1055 } 1056 1057 static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = { 1058 SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume) 1059 SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend, 1060 zynqmp_dma_runtime_resume, NULL) 1061 }; 1062 1063 /** 1064 * zynqmp_dma_probe - Driver probe function 1065 * @pdev: Pointer to the platform_device structure 1066 * 1067 * Return: '0' on success and failure value on error 1068 */ 1069 static int zynqmp_dma_probe(struct platform_device *pdev) 1070 { 1071 struct zynqmp_dma_device *zdev; 1072 struct dma_device *p; 1073 int ret; 1074 1075 zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); 1076 if (!zdev) 1077 return -ENOMEM; 1078 1079 zdev->dev = &pdev->dev; 1080 INIT_LIST_HEAD(&zdev->common.channels); 1081 1082 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 1083 if (ret) { 1084 dev_err(&pdev->dev, "DMA not available for address range\n"); 1085 return ret; 1086 } 1087 dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); 1088 1089 p = &zdev->common; 1090 p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; 1091 p->device_terminate_all = zynqmp_dma_device_terminate_all; 1092 p->device_synchronize = zynqmp_dma_synchronize; 1093 p->device_issue_pending = zynqmp_dma_issue_pending; 1094 p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; 1095 p->device_free_chan_resources = zynqmp_dma_free_chan_resources; 1096 p->device_tx_status = dma_cookie_status; 1097 p->device_config = zynqmp_dma_device_config; 1098 p->dev = &pdev->dev; 1099 1100 zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main"); 1101 if (IS_ERR(zdev->clk_main)) 1102 return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main), 1103 "main clock not found.\n"); 1104 1105 zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); 1106 if (IS_ERR(zdev->clk_apb)) 1107 return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb), 1108 "apb clock not found.\n"); 1109 1110 platform_set_drvdata(pdev, zdev); 1111 pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); 1112 pm_runtime_use_autosuspend(zdev->dev); 1113 pm_runtime_enable(zdev->dev); 1114 ret = pm_runtime_resume_and_get(zdev->dev); 1115 if (ret < 0) { 1116 dev_err(&pdev->dev, "device wakeup failed.\n"); 1117 pm_runtime_disable(zdev->dev); 1118 } 1119 if (!pm_runtime_enabled(zdev->dev)) { 1120 ret = zynqmp_dma_runtime_resume(zdev->dev); 1121 if (ret) 1122 return ret; 1123 } 1124 1125 ret = zynqmp_dma_chan_probe(zdev, pdev); 1126 if (ret) { 1127 dev_err_probe(&pdev->dev, ret, "Probing channel failed\n"); 1128 goto err_disable_pm; 1129 } 1130 1131 p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); 1132 p->src_addr_widths = BIT(zdev->chan->bus_width / 8); 1133 1134 ret = dma_async_device_register(&zdev->common); 1135 if (ret) { 1136 dev_err(zdev->dev, "failed to register the dma device\n"); 1137 goto free_chan_resources; 1138 } 1139 1140 ret = of_dma_controller_register(pdev->dev.of_node, 1141 of_zynqmp_dma_xlate, zdev); 1142 if (ret) { 1143 dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n"); 1144 dma_async_device_unregister(&zdev->common); 1145 goto free_chan_resources; 1146 } 1147 1148 pm_runtime_mark_last_busy(zdev->dev); 1149 pm_runtime_put_sync_autosuspend(zdev->dev); 1150 1151 return 0; 1152 1153 free_chan_resources: 1154 zynqmp_dma_chan_remove(zdev->chan); 1155 err_disable_pm: 1156 if (!pm_runtime_enabled(zdev->dev)) 1157 zynqmp_dma_runtime_suspend(zdev->dev); 1158 pm_runtime_disable(zdev->dev); 1159 return ret; 1160 } 1161 1162 /** 1163 * zynqmp_dma_remove - Driver remove function 1164 * @pdev: Pointer to the platform_device structure 1165 * 1166 * Return: Always '0' 1167 */ 1168 static void zynqmp_dma_remove(struct platform_device *pdev) 1169 { 1170 struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); 1171 1172 of_dma_controller_free(pdev->dev.of_node); 1173 dma_async_device_unregister(&zdev->common); 1174 1175 zynqmp_dma_chan_remove(zdev->chan); 1176 pm_runtime_disable(zdev->dev); 1177 if (!pm_runtime_enabled(zdev->dev)) 1178 zynqmp_dma_runtime_suspend(zdev->dev); 1179 } 1180 1181 static const struct of_device_id zynqmp_dma_of_match[] = { 1182 { .compatible = "amd,versal2-dma-1.0", .data = &versal2_dma_config }, 1183 { .compatible = "xlnx,zynqmp-dma-1.0", }, 1184 {} 1185 }; 1186 MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); 1187 1188 static struct platform_driver zynqmp_dma_driver = { 1189 .driver = { 1190 .name = "xilinx-zynqmp-dma", 1191 .of_match_table = zynqmp_dma_of_match, 1192 .pm = &zynqmp_dma_dev_pm_ops, 1193 }, 1194 .probe = zynqmp_dma_probe, 1195 .remove_new = zynqmp_dma_remove, 1196 }; 1197 1198 module_platform_driver(zynqmp_dma_driver); 1199 1200 MODULE_LICENSE("GPL"); 1201 MODULE_AUTHOR("Xilinx, Inc."); 1202 MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); 1203