1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx ZynqMP DMA Engine 4 * 5 * Copyright (C) 2016 Xilinx, Inc. All rights reserved. 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/init.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_dma.h> 16 #include <linux/platform_device.h> 17 #include <linux/slab.h> 18 #include <linux/clk.h> 19 #include <linux/io-64-nonatomic-lo-hi.h> 20 #include <linux/pm_runtime.h> 21 22 #include "../dmaengine.h" 23 24 /* Register Offsets */ 25 #define ZYNQMP_DMA_ISR (chan->irq_offset + 0x100) 26 #define ZYNQMP_DMA_IMR (chan->irq_offset + 0x104) 27 #define ZYNQMP_DMA_IER (chan->irq_offset + 0x108) 28 #define ZYNQMP_DMA_IDS (chan->irq_offset + 0x10c) 29 #define ZYNQMP_DMA_CTRL0 0x110 30 #define ZYNQMP_DMA_CTRL1 0x114 31 #define ZYNQMP_DMA_DATA_ATTR 0x120 32 #define ZYNQMP_DMA_DSCR_ATTR 0x124 33 #define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 34 #define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C 35 #define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 36 #define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 37 #define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 38 #define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C 39 #define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 40 #define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 41 #define ZYNQMP_DMA_SRC_START_LSB 0x158 42 #define ZYNQMP_DMA_SRC_START_MSB 0x15C 43 #define ZYNQMP_DMA_DST_START_LSB 0x160 44 #define ZYNQMP_DMA_DST_START_MSB 0x164 45 #define ZYNQMP_DMA_TOTAL_BYTE 0x188 46 #define ZYNQMP_DMA_RATE_CTRL 0x18C 47 #define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 48 #define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 49 #define ZYNQMP_DMA_CTRL2 0x200 50 51 /* Interrupt registers bit field definitions */ 52 #define ZYNQMP_DMA_DONE BIT(10) 53 #define ZYNQMP_DMA_AXI_WR_DATA BIT(9) 54 #define ZYNQMP_DMA_AXI_RD_DATA BIT(8) 55 #define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) 56 #define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) 57 #define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) 58 #define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) 59 #define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) 60 #define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) 61 #define ZYNQMP_DMA_INV_APB BIT(0) 62 63 /* Control 0 register bit field definitions */ 64 #define ZYNQMP_DMA_OVR_FETCH BIT(7) 65 #define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) 66 #define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) 67 68 /* Control 1 register bit field definitions */ 69 #define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) 70 71 /* Data Attribute register bit field definitions */ 72 #define ZYNQMP_DMA_ARBURST GENMASK(27, 26) 73 #define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) 74 #define ZYNQMP_DMA_ARCACHE_OFST 22 75 #define ZYNQMP_DMA_ARQOS GENMASK(21, 18) 76 #define ZYNQMP_DMA_ARQOS_OFST 18 77 #define ZYNQMP_DMA_ARLEN GENMASK(17, 14) 78 #define ZYNQMP_DMA_ARLEN_OFST 14 79 #define ZYNQMP_DMA_AWBURST GENMASK(13, 12) 80 #define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) 81 #define ZYNQMP_DMA_AWCACHE_OFST 8 82 #define ZYNQMP_DMA_AWQOS GENMASK(7, 4) 83 #define ZYNQMP_DMA_AWQOS_OFST 4 84 #define ZYNQMP_DMA_AWLEN GENMASK(3, 0) 85 #define ZYNQMP_DMA_AWLEN_OFST 0 86 87 /* Descriptor Attribute register bit field definitions */ 88 #define ZYNQMP_DMA_AXCOHRNT BIT(8) 89 #define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) 90 #define ZYNQMP_DMA_AXCACHE_OFST 4 91 #define ZYNQMP_DMA_AXQOS GENMASK(3, 0) 92 #define ZYNQMP_DMA_AXQOS_OFST 0 93 94 /* Control register 2 bit field definitions */ 95 #define ZYNQMP_DMA_ENABLE BIT(0) 96 97 /* Buffer Descriptor definitions */ 98 #define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 99 #define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 100 #define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 101 #define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 102 103 /* Interrupt Mask specific definitions */ 104 #define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ 105 ZYNQMP_DMA_AXI_WR_DATA | \ 106 ZYNQMP_DMA_AXI_RD_DST_DSCR | \ 107 ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ 108 ZYNQMP_DMA_INV_APB) 109 #define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ 110 ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ 111 ZYNQMP_DMA_IRQ_DST_ACCT_ERR) 112 #define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) 113 #define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ 114 ZYNQMP_DMA_INT_ERR | \ 115 ZYNQMP_DMA_INT_OVRFL | \ 116 ZYNQMP_DMA_DST_DSCR_DONE) 117 118 /* Max number of descriptors per channel */ 119 #define ZYNQMP_DMA_NUM_DESCS 32 120 121 /* Max transfer size per descriptor */ 122 #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 123 124 /* Max burst lengths */ 125 #define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U 126 #define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U 127 128 /* Reset values for data attributes */ 129 #define ZYNQMP_DMA_AXCACHE_VAL 0xF 130 131 #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F 132 133 #define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF 134 135 /* Bus width in bits */ 136 #define ZYNQMP_DMA_BUS_WIDTH_64 64 137 #define ZYNQMP_DMA_BUS_WIDTH_128 128 138 139 #define ZDMA_PM_TIMEOUT 100 140 141 #define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) 142 143 #define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ 144 common) 145 #define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ 146 async_tx) 147 148 /* IRQ Register offset for Versal Gen 2 */ 149 #define IRQ_REG_OFFSET 0x308 150 151 /** 152 * struct zynqmp_dma_desc_ll - Hw linked list descriptor 153 * @addr: Buffer address 154 * @size: Size of the buffer 155 * @ctrl: Control word 156 * @nxtdscraddr: Next descriptor base address 157 * @rsvd: Reserved field and for Hw internal use. 158 */ 159 struct zynqmp_dma_desc_ll { 160 u64 addr; 161 u32 size; 162 u32 ctrl; 163 u64 nxtdscraddr; 164 u64 rsvd; 165 }; 166 167 /** 168 * struct zynqmp_dma_desc_sw - Per Transaction structure 169 * @src: Source address for simple mode dma 170 * @dst: Destination address for simple mode dma 171 * @len: Transfer length for simple mode dma 172 * @node: Node in the channel descriptor list 173 * @tx_list: List head for the current transfer 174 * @async_tx: Async transaction descriptor 175 * @src_v: Virtual address of the src descriptor 176 * @src_p: Physical address of the src descriptor 177 * @dst_v: Virtual address of the dst descriptor 178 * @dst_p: Physical address of the dst descriptor 179 */ 180 struct zynqmp_dma_desc_sw { 181 u64 src; 182 u64 dst; 183 u32 len; 184 struct list_head node; 185 struct list_head tx_list; 186 struct dma_async_tx_descriptor async_tx; 187 struct zynqmp_dma_desc_ll *src_v; 188 dma_addr_t src_p; 189 struct zynqmp_dma_desc_ll *dst_v; 190 dma_addr_t dst_p; 191 }; 192 193 /** 194 * struct zynqmp_dma_chan - Driver specific DMA channel structure 195 * @zdev: Driver specific device structure 196 * @regs: Control registers offset 197 * @lock: Descriptor operation lock 198 * @pending_list: Descriptors waiting 199 * @free_list: Descriptors free 200 * @active_list: Descriptors active 201 * @sw_desc_pool: SW descriptor pool 202 * @done_list: Complete descriptors 203 * @common: DMA common channel 204 * @desc_pool_v: Statically allocated descriptor base 205 * @desc_pool_p: Physical allocated descriptor base 206 * @desc_free_cnt: Descriptor available count 207 * @dev: The dma device 208 * @irq: Channel IRQ 209 * @is_dmacoherent: Tells whether dma operations are coherent or not 210 * @tasklet: Cleanup work after irq 211 * @idle : Channel status; 212 * @desc_size: Size of the low level descriptor 213 * @err: Channel has errors 214 * @bus_width: Bus width 215 * @src_burst_len: Source burst length 216 * @dst_burst_len: Dest burst length 217 * @irq_offset: Irq register offset 218 */ 219 struct zynqmp_dma_chan { 220 struct zynqmp_dma_device *zdev; 221 void __iomem *regs; 222 spinlock_t lock; 223 struct list_head pending_list; 224 struct list_head free_list; 225 struct list_head active_list; 226 struct zynqmp_dma_desc_sw *sw_desc_pool; 227 struct list_head done_list; 228 struct dma_chan common; 229 void *desc_pool_v; 230 dma_addr_t desc_pool_p; 231 u32 desc_free_cnt; 232 struct device *dev; 233 int irq; 234 bool is_dmacoherent; 235 struct tasklet_struct tasklet; 236 bool idle; 237 size_t desc_size; 238 bool err; 239 u32 bus_width; 240 u32 src_burst_len; 241 u32 dst_burst_len; 242 u32 irq_offset; 243 }; 244 245 /** 246 * struct zynqmp_dma_device - DMA device structure 247 * @dev: Device Structure 248 * @common: DMA device structure 249 * @chan: Driver specific DMA channel 250 * @clk_main: Pointer to main clock 251 * @clk_apb: Pointer to apb clock 252 */ 253 struct zynqmp_dma_device { 254 struct device *dev; 255 struct dma_device common; 256 struct zynqmp_dma_chan *chan; 257 struct clk *clk_main; 258 struct clk *clk_apb; 259 }; 260 261 struct zynqmp_dma_config { 262 u32 offset; 263 }; 264 265 static const struct zynqmp_dma_config versal2_dma_config = { 266 .offset = IRQ_REG_OFFSET, 267 }; 268 269 static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, 270 u64 value) 271 { 272 lo_hi_writeq(value, chan->regs + reg); 273 } 274 275 /** 276 * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller 277 * @chan: ZynqMP DMA DMA channel pointer 278 * @desc: Transaction descriptor pointer 279 */ 280 static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, 281 struct zynqmp_dma_desc_sw *desc) 282 { 283 dma_addr_t addr; 284 285 addr = desc->src_p; 286 zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); 287 addr = desc->dst_p; 288 zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); 289 } 290 291 /** 292 * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor 293 * @chan: ZynqMP DMA channel pointer 294 * @desc: Hw descriptor pointer 295 */ 296 static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, 297 void *desc) 298 { 299 struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; 300 301 hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; 302 hw++; 303 hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; 304 } 305 306 /** 307 * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor 308 * @chan: ZynqMP DMA channel pointer 309 * @sdesc: Hw descriptor pointer 310 * @src: Source buffer address 311 * @dst: Destination buffer address 312 * @len: Transfer length 313 * @prev: Previous hw descriptor pointer 314 */ 315 static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, 316 struct zynqmp_dma_desc_ll *sdesc, 317 dma_addr_t src, dma_addr_t dst, size_t len, 318 struct zynqmp_dma_desc_ll *prev) 319 { 320 struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; 321 322 sdesc->size = ddesc->size = len; 323 sdesc->addr = src; 324 ddesc->addr = dst; 325 326 sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; 327 if (chan->is_dmacoherent) { 328 sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; 329 ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; 330 } 331 332 if (prev) { 333 dma_addr_t addr = chan->desc_pool_p + 334 ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); 335 ddesc = prev + 1; 336 prev->nxtdscraddr = addr; 337 ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); 338 } 339 } 340 341 /** 342 * zynqmp_dma_init - Initialize the channel 343 * @chan: ZynqMP DMA channel pointer 344 */ 345 static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) 346 { 347 u32 val; 348 349 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); 350 val = readl(chan->regs + ZYNQMP_DMA_ISR); 351 writel(val, chan->regs + ZYNQMP_DMA_ISR); 352 353 if (chan->is_dmacoherent) { 354 val = ZYNQMP_DMA_AXCOHRNT; 355 val = (val & ~ZYNQMP_DMA_AXCACHE) | 356 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); 357 writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); 358 } 359 360 val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); 361 if (chan->is_dmacoherent) { 362 val = (val & ~ZYNQMP_DMA_ARCACHE) | 363 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); 364 val = (val & ~ZYNQMP_DMA_AWCACHE) | 365 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); 366 } 367 writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); 368 369 /* Clearing the interrupt account registers */ 370 val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); 371 val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 372 373 chan->idle = true; 374 } 375 376 /** 377 * zynqmp_dma_tx_submit - Submit DMA transaction 378 * @tx: Async transaction descriptor pointer 379 * 380 * Return: cookie value 381 */ 382 static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) 383 { 384 struct zynqmp_dma_chan *chan = to_chan(tx->chan); 385 struct zynqmp_dma_desc_sw *desc, *new; 386 dma_cookie_t cookie; 387 unsigned long irqflags; 388 389 new = tx_to_desc(tx); 390 spin_lock_irqsave(&chan->lock, irqflags); 391 cookie = dma_cookie_assign(tx); 392 393 if (!list_empty(&chan->pending_list)) { 394 desc = list_last_entry(&chan->pending_list, 395 struct zynqmp_dma_desc_sw, node); 396 if (!list_empty(&desc->tx_list)) 397 desc = list_last_entry(&desc->tx_list, 398 struct zynqmp_dma_desc_sw, node); 399 desc->src_v->nxtdscraddr = new->src_p; 400 desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; 401 desc->dst_v->nxtdscraddr = new->dst_p; 402 desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; 403 } 404 405 list_add_tail(&new->node, &chan->pending_list); 406 spin_unlock_irqrestore(&chan->lock, irqflags); 407 408 return cookie; 409 } 410 411 /** 412 * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool 413 * @chan: ZynqMP DMA channel pointer 414 * 415 * Return: The sw descriptor 416 */ 417 static struct zynqmp_dma_desc_sw * 418 zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) 419 { 420 struct zynqmp_dma_desc_sw *desc; 421 unsigned long irqflags; 422 423 spin_lock_irqsave(&chan->lock, irqflags); 424 desc = list_first_entry(&chan->free_list, 425 struct zynqmp_dma_desc_sw, node); 426 list_del(&desc->node); 427 spin_unlock_irqrestore(&chan->lock, irqflags); 428 429 INIT_LIST_HEAD(&desc->tx_list); 430 /* Clear the src and dst descriptor memory */ 431 memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); 432 memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); 433 434 return desc; 435 } 436 437 /** 438 * zynqmp_dma_free_descriptor - Issue pending transactions 439 * @chan: ZynqMP DMA channel pointer 440 * @sdesc: Transaction descriptor pointer 441 */ 442 static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, 443 struct zynqmp_dma_desc_sw *sdesc) 444 { 445 struct zynqmp_dma_desc_sw *child, *next; 446 447 chan->desc_free_cnt++; 448 list_move_tail(&sdesc->node, &chan->free_list); 449 list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { 450 chan->desc_free_cnt++; 451 list_move_tail(&child->node, &chan->free_list); 452 } 453 } 454 455 /** 456 * zynqmp_dma_free_desc_list - Free descriptors list 457 * @chan: ZynqMP DMA channel pointer 458 * @list: List to parse and delete the descriptor 459 */ 460 static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, 461 struct list_head *list) 462 { 463 struct zynqmp_dma_desc_sw *desc, *next; 464 465 list_for_each_entry_safe(desc, next, list, node) 466 zynqmp_dma_free_descriptor(chan, desc); 467 } 468 469 /** 470 * zynqmp_dma_alloc_chan_resources - Allocate channel resources 471 * @dchan: DMA channel 472 * 473 * Return: Number of descriptors on success and failure value on error 474 */ 475 static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) 476 { 477 struct zynqmp_dma_chan *chan = to_chan(dchan); 478 struct zynqmp_dma_desc_sw *desc; 479 int i, ret; 480 481 ret = pm_runtime_resume_and_get(chan->dev); 482 if (ret < 0) 483 return ret; 484 485 chan->sw_desc_pool = kzalloc_objs(*desc, ZYNQMP_DMA_NUM_DESCS); 486 if (!chan->sw_desc_pool) 487 return -ENOMEM; 488 489 chan->idle = true; 490 chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; 491 492 INIT_LIST_HEAD(&chan->free_list); 493 494 for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { 495 desc = chan->sw_desc_pool + i; 496 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 497 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; 498 list_add_tail(&desc->node, &chan->free_list); 499 } 500 501 chan->desc_pool_v = dma_alloc_coherent(chan->dev, 502 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * 503 ZYNQMP_DMA_NUM_DESCS), 504 &chan->desc_pool_p, GFP_KERNEL); 505 if (!chan->desc_pool_v) 506 return -ENOMEM; 507 508 for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { 509 desc = chan->sw_desc_pool + i; 510 desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + 511 (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); 512 desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); 513 desc->src_p = chan->desc_pool_p + 514 (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); 515 desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); 516 } 517 518 return ZYNQMP_DMA_NUM_DESCS; 519 } 520 521 /** 522 * zynqmp_dma_start - Start DMA channel 523 * @chan: ZynqMP DMA channel pointer 524 */ 525 static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) 526 { 527 writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); 528 writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); 529 chan->idle = false; 530 writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); 531 } 532 533 /** 534 * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt 535 * @chan: ZynqMP DMA channel pointer 536 * @status: Interrupt status value 537 */ 538 static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) 539 { 540 if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL) 541 writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); 542 if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) 543 readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 544 if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) 545 readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); 546 } 547 548 static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) 549 { 550 u32 val, burst_val; 551 552 val = readl(chan->regs + ZYNQMP_DMA_CTRL0); 553 val |= ZYNQMP_DMA_POINT_TYPE_SG; 554 writel(val, chan->regs + ZYNQMP_DMA_CTRL0); 555 556 val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); 557 burst_val = __ilog2_u32(chan->src_burst_len); 558 val = (val & ~ZYNQMP_DMA_ARLEN) | 559 ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); 560 burst_val = __ilog2_u32(chan->dst_burst_len); 561 val = (val & ~ZYNQMP_DMA_AWLEN) | 562 ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); 563 writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); 564 } 565 566 /** 567 * zynqmp_dma_device_config - Zynqmp dma device configuration 568 * @dchan: DMA channel 569 * @config: DMA device config 570 * 571 * Return: 0 always 572 */ 573 static int zynqmp_dma_device_config(struct dma_chan *dchan, 574 struct dma_slave_config *config) 575 { 576 struct zynqmp_dma_chan *chan = to_chan(dchan); 577 578 chan->src_burst_len = clamp(config->src_maxburst, 1U, 579 ZYNQMP_DMA_MAX_SRC_BURST_LEN); 580 chan->dst_burst_len = clamp(config->dst_maxburst, 1U, 581 ZYNQMP_DMA_MAX_DST_BURST_LEN); 582 583 return 0; 584 } 585 586 /** 587 * zynqmp_dma_start_transfer - Initiate the new transfer 588 * @chan: ZynqMP DMA channel pointer 589 */ 590 static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) 591 { 592 struct zynqmp_dma_desc_sw *desc; 593 594 if (!chan->idle) 595 return; 596 597 zynqmp_dma_config(chan); 598 599 desc = list_first_entry_or_null(&chan->pending_list, 600 struct zynqmp_dma_desc_sw, node); 601 if (!desc) 602 return; 603 604 list_splice_tail_init(&chan->pending_list, &chan->active_list); 605 zynqmp_dma_update_desc_to_ctrlr(chan, desc); 606 zynqmp_dma_start(chan); 607 } 608 609 610 /** 611 * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors 612 * @chan: ZynqMP DMA channel 613 */ 614 static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) 615 { 616 struct zynqmp_dma_desc_sw *desc, *next; 617 unsigned long irqflags; 618 619 spin_lock_irqsave(&chan->lock, irqflags); 620 621 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 622 struct dmaengine_desc_callback cb; 623 624 dmaengine_desc_get_callback(&desc->async_tx, &cb); 625 if (dmaengine_desc_callback_valid(&cb)) { 626 spin_unlock_irqrestore(&chan->lock, irqflags); 627 dmaengine_desc_callback_invoke(&cb, NULL); 628 spin_lock_irqsave(&chan->lock, irqflags); 629 } 630 631 /* Run any dependencies, then free the descriptor */ 632 zynqmp_dma_free_descriptor(chan, desc); 633 } 634 635 spin_unlock_irqrestore(&chan->lock, irqflags); 636 } 637 638 /** 639 * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete 640 * @chan: ZynqMP DMA channel pointer 641 */ 642 static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) 643 { 644 struct zynqmp_dma_desc_sw *desc; 645 646 desc = list_first_entry_or_null(&chan->active_list, 647 struct zynqmp_dma_desc_sw, node); 648 if (!desc) 649 return; 650 list_del(&desc->node); 651 dma_cookie_complete(&desc->async_tx); 652 list_add_tail(&desc->node, &chan->done_list); 653 } 654 655 /** 656 * zynqmp_dma_issue_pending - Issue pending transactions 657 * @dchan: DMA channel pointer 658 */ 659 static void zynqmp_dma_issue_pending(struct dma_chan *dchan) 660 { 661 struct zynqmp_dma_chan *chan = to_chan(dchan); 662 unsigned long irqflags; 663 664 spin_lock_irqsave(&chan->lock, irqflags); 665 zynqmp_dma_start_transfer(chan); 666 spin_unlock_irqrestore(&chan->lock, irqflags); 667 } 668 669 /** 670 * zynqmp_dma_free_descriptors - Free channel descriptors 671 * @chan: ZynqMP DMA channel pointer 672 */ 673 static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) 674 { 675 unsigned long irqflags; 676 677 spin_lock_irqsave(&chan->lock, irqflags); 678 zynqmp_dma_free_desc_list(chan, &chan->active_list); 679 zynqmp_dma_free_desc_list(chan, &chan->pending_list); 680 zynqmp_dma_free_desc_list(chan, &chan->done_list); 681 spin_unlock_irqrestore(&chan->lock, irqflags); 682 } 683 684 /** 685 * zynqmp_dma_free_chan_resources - Free channel resources 686 * @dchan: DMA channel pointer 687 */ 688 static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) 689 { 690 struct zynqmp_dma_chan *chan = to_chan(dchan); 691 692 zynqmp_dma_free_descriptors(chan); 693 dma_free_coherent(chan->dev, 694 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), 695 chan->desc_pool_v, chan->desc_pool_p); 696 kfree(chan->sw_desc_pool); 697 pm_runtime_put_autosuspend(chan->dev); 698 } 699 700 /** 701 * zynqmp_dma_reset - Reset the channel 702 * @chan: ZynqMP DMA channel pointer 703 */ 704 static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) 705 { 706 unsigned long irqflags; 707 708 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); 709 710 spin_lock_irqsave(&chan->lock, irqflags); 711 zynqmp_dma_complete_descriptor(chan); 712 spin_unlock_irqrestore(&chan->lock, irqflags); 713 zynqmp_dma_chan_desc_cleanup(chan); 714 zynqmp_dma_free_descriptors(chan); 715 716 zynqmp_dma_init(chan); 717 } 718 719 /** 720 * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler 721 * @irq: IRQ number 722 * @data: Pointer to the ZynqMP DMA channel structure 723 * 724 * Return: IRQ_HANDLED/IRQ_NONE 725 */ 726 static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) 727 { 728 struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; 729 u32 isr, imr, status; 730 irqreturn_t ret = IRQ_NONE; 731 732 isr = readl(chan->regs + ZYNQMP_DMA_ISR); 733 imr = readl(chan->regs + ZYNQMP_DMA_IMR); 734 status = isr & ~imr; 735 736 writel(isr, chan->regs + ZYNQMP_DMA_ISR); 737 if (status & ZYNQMP_DMA_INT_DONE) { 738 tasklet_schedule(&chan->tasklet); 739 ret = IRQ_HANDLED; 740 } 741 742 if (status & ZYNQMP_DMA_DONE) 743 chan->idle = true; 744 745 if (status & ZYNQMP_DMA_INT_ERR) { 746 chan->err = true; 747 tasklet_schedule(&chan->tasklet); 748 dev_err(chan->dev, "Channel %p has errors\n", chan); 749 ret = IRQ_HANDLED; 750 } 751 752 if (status & ZYNQMP_DMA_INT_OVRFL) { 753 zynqmp_dma_handle_ovfl_int(chan, status); 754 dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan); 755 ret = IRQ_HANDLED; 756 } 757 758 return ret; 759 } 760 761 /** 762 * zynqmp_dma_do_tasklet - Schedule completion tasklet 763 * @t: Pointer to the ZynqMP DMA channel structure 764 */ 765 static void zynqmp_dma_do_tasklet(struct tasklet_struct *t) 766 { 767 struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet); 768 u32 count; 769 unsigned long irqflags; 770 771 if (chan->err) { 772 zynqmp_dma_reset(chan); 773 chan->err = false; 774 return; 775 } 776 777 spin_lock_irqsave(&chan->lock, irqflags); 778 count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); 779 while (count) { 780 zynqmp_dma_complete_descriptor(chan); 781 count--; 782 } 783 spin_unlock_irqrestore(&chan->lock, irqflags); 784 785 zynqmp_dma_chan_desc_cleanup(chan); 786 787 if (chan->idle) { 788 spin_lock_irqsave(&chan->lock, irqflags); 789 zynqmp_dma_start_transfer(chan); 790 spin_unlock_irqrestore(&chan->lock, irqflags); 791 } 792 } 793 794 /** 795 * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel 796 * @dchan: DMA channel pointer 797 * 798 * Return: Always '0' 799 */ 800 static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) 801 { 802 struct zynqmp_dma_chan *chan = to_chan(dchan); 803 804 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); 805 zynqmp_dma_free_descriptors(chan); 806 807 return 0; 808 } 809 810 /** 811 * zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context. 812 * @dchan: DMA channel pointer 813 */ 814 static void zynqmp_dma_synchronize(struct dma_chan *dchan) 815 { 816 struct zynqmp_dma_chan *chan = to_chan(dchan); 817 818 tasklet_kill(&chan->tasklet); 819 } 820 821 /** 822 * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction 823 * @dchan: DMA channel 824 * @dma_dst: Destination buffer address 825 * @dma_src: Source buffer address 826 * @len: Transfer length 827 * @flags: transfer ack flags 828 * 829 * Return: Async transaction descriptor on success and NULL on failure 830 */ 831 static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( 832 struct dma_chan *dchan, dma_addr_t dma_dst, 833 dma_addr_t dma_src, size_t len, ulong flags) 834 { 835 struct zynqmp_dma_chan *chan; 836 struct zynqmp_dma_desc_sw *new, *first = NULL; 837 void *desc = NULL, *prev = NULL; 838 size_t copy; 839 u32 desc_cnt; 840 unsigned long irqflags; 841 842 chan = to_chan(dchan); 843 844 desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); 845 846 spin_lock_irqsave(&chan->lock, irqflags); 847 if (desc_cnt > chan->desc_free_cnt) { 848 spin_unlock_irqrestore(&chan->lock, irqflags); 849 dev_dbg(chan->dev, "chan %p descs are not available\n", chan); 850 return NULL; 851 } 852 chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; 853 spin_unlock_irqrestore(&chan->lock, irqflags); 854 855 do { 856 /* Allocate and populate the descriptor */ 857 new = zynqmp_dma_get_descriptor(chan); 858 859 copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); 860 desc = (struct zynqmp_dma_desc_ll *)new->src_v; 861 zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, 862 dma_dst, copy, prev); 863 prev = desc; 864 len -= copy; 865 dma_src += copy; 866 dma_dst += copy; 867 if (!first) 868 first = new; 869 else 870 list_add_tail(&new->node, &first->tx_list); 871 } while (len); 872 873 zynqmp_dma_desc_config_eod(chan, desc); 874 async_tx_ack(&first->async_tx); 875 first->async_tx.flags = (enum dma_ctrl_flags)flags; 876 return &first->async_tx; 877 } 878 879 /** 880 * zynqmp_dma_chan_remove - Channel remove function 881 * @chan: ZynqMP DMA channel pointer 882 */ 883 static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) 884 { 885 if (!chan) 886 return; 887 888 if (chan->irq) 889 devm_free_irq(chan->zdev->dev, chan->irq, chan); 890 tasklet_kill(&chan->tasklet); 891 list_del(&chan->common.device_node); 892 } 893 894 /** 895 * zynqmp_dma_chan_probe - Per Channel Probing 896 * @zdev: Driver specific device structure 897 * @pdev: Pointer to the platform_device structure 898 * 899 * Return: '0' on success and failure value on error 900 */ 901 static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, 902 struct platform_device *pdev) 903 { 904 struct zynqmp_dma_chan *chan; 905 struct device_node *node = pdev->dev.of_node; 906 const struct zynqmp_dma_config *match_data; 907 int err; 908 909 chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); 910 if (!chan) 911 return -ENOMEM; 912 chan->dev = zdev->dev; 913 chan->zdev = zdev; 914 915 chan->regs = devm_platform_ioremap_resource(pdev, 0); 916 if (IS_ERR(chan->regs)) 917 return PTR_ERR(chan->regs); 918 919 chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; 920 chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; 921 chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; 922 err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); 923 if (err < 0) { 924 dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); 925 return err; 926 } 927 928 if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && 929 chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { 930 dev_err(zdev->dev, "invalid bus-width value"); 931 return -EINVAL; 932 } 933 934 match_data = of_device_get_match_data(&pdev->dev); 935 if (match_data) 936 chan->irq_offset = match_data->offset; 937 938 chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); 939 zdev->chan = chan; 940 tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet); 941 spin_lock_init(&chan->lock); 942 INIT_LIST_HEAD(&chan->active_list); 943 INIT_LIST_HEAD(&chan->pending_list); 944 INIT_LIST_HEAD(&chan->done_list); 945 INIT_LIST_HEAD(&chan->free_list); 946 947 dma_cookie_init(&chan->common); 948 chan->common.device = &zdev->common; 949 list_add_tail(&chan->common.device_node, &zdev->common.channels); 950 951 zynqmp_dma_init(chan); 952 chan->irq = platform_get_irq(pdev, 0); 953 if (chan->irq < 0) 954 return -ENXIO; 955 err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, 956 "zynqmp-dma", chan); 957 if (err) 958 return err; 959 960 chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); 961 chan->idle = true; 962 return 0; 963 } 964 965 /** 966 * of_zynqmp_dma_xlate - Translation function 967 * @dma_spec: Pointer to DMA specifier as found in the device tree 968 * @ofdma: Pointer to DMA controller data 969 * 970 * Return: DMA channel pointer on success and NULL on error 971 */ 972 static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, 973 struct of_dma *ofdma) 974 { 975 struct zynqmp_dma_device *zdev = ofdma->of_dma_data; 976 977 return dma_get_slave_channel(&zdev->chan->common); 978 } 979 980 /** 981 * zynqmp_dma_suspend - Suspend method for the driver 982 * @dev: Address of the device structure 983 * 984 * Put the driver into low power mode. 985 * Return: 0 on success and failure value on error 986 */ 987 static int __maybe_unused zynqmp_dma_suspend(struct device *dev) 988 { 989 if (!device_may_wakeup(dev)) 990 return pm_runtime_force_suspend(dev); 991 992 return 0; 993 } 994 995 /** 996 * zynqmp_dma_resume - Resume from suspend 997 * @dev: Address of the device structure 998 * 999 * Resume operation after suspend. 1000 * Return: 0 on success and failure value on error 1001 */ 1002 static int __maybe_unused zynqmp_dma_resume(struct device *dev) 1003 { 1004 if (!device_may_wakeup(dev)) 1005 return pm_runtime_force_resume(dev); 1006 1007 return 0; 1008 } 1009 1010 /** 1011 * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver 1012 * @dev: Address of the device structure 1013 * 1014 * Put the driver into low power mode. 1015 * Return: 0 always 1016 */ 1017 static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev) 1018 { 1019 struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); 1020 1021 clk_disable_unprepare(zdev->clk_main); 1022 clk_disable_unprepare(zdev->clk_apb); 1023 1024 return 0; 1025 } 1026 1027 /** 1028 * zynqmp_dma_runtime_resume - Runtime suspend method for the driver 1029 * @dev: Address of the device structure 1030 * 1031 * Put the driver into low power mode. 1032 * Return: 0 always 1033 */ 1034 static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev) 1035 { 1036 struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); 1037 int err; 1038 1039 err = clk_prepare_enable(zdev->clk_main); 1040 if (err) { 1041 dev_err(dev, "Unable to enable main clock.\n"); 1042 return err; 1043 } 1044 1045 err = clk_prepare_enable(zdev->clk_apb); 1046 if (err) { 1047 dev_err(dev, "Unable to enable apb clock.\n"); 1048 clk_disable_unprepare(zdev->clk_main); 1049 return err; 1050 } 1051 1052 return 0; 1053 } 1054 1055 static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = { 1056 SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume) 1057 SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend, 1058 zynqmp_dma_runtime_resume, NULL) 1059 }; 1060 1061 /** 1062 * zynqmp_dma_probe - Driver probe function 1063 * @pdev: Pointer to the platform_device structure 1064 * 1065 * Return: '0' on success and failure value on error 1066 */ 1067 static int zynqmp_dma_probe(struct platform_device *pdev) 1068 { 1069 struct zynqmp_dma_device *zdev; 1070 struct dma_device *p; 1071 int ret; 1072 1073 zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); 1074 if (!zdev) 1075 return -ENOMEM; 1076 1077 zdev->dev = &pdev->dev; 1078 INIT_LIST_HEAD(&zdev->common.channels); 1079 1080 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 1081 if (ret) { 1082 dev_err(&pdev->dev, "DMA not available for address range\n"); 1083 return ret; 1084 } 1085 dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); 1086 1087 p = &zdev->common; 1088 p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; 1089 p->device_terminate_all = zynqmp_dma_device_terminate_all; 1090 p->device_synchronize = zynqmp_dma_synchronize; 1091 p->device_issue_pending = zynqmp_dma_issue_pending; 1092 p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; 1093 p->device_free_chan_resources = zynqmp_dma_free_chan_resources; 1094 p->device_tx_status = dma_cookie_status; 1095 p->device_config = zynqmp_dma_device_config; 1096 p->dev = &pdev->dev; 1097 1098 zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main"); 1099 if (IS_ERR(zdev->clk_main)) 1100 return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main), 1101 "main clock not found.\n"); 1102 1103 zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); 1104 if (IS_ERR(zdev->clk_apb)) 1105 return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb), 1106 "apb clock not found.\n"); 1107 1108 platform_set_drvdata(pdev, zdev); 1109 pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); 1110 pm_runtime_use_autosuspend(zdev->dev); 1111 pm_runtime_enable(zdev->dev); 1112 ret = pm_runtime_resume_and_get(zdev->dev); 1113 if (ret < 0) { 1114 dev_err(&pdev->dev, "device wakeup failed.\n"); 1115 pm_runtime_disable(zdev->dev); 1116 } 1117 if (!pm_runtime_enabled(zdev->dev)) { 1118 ret = zynqmp_dma_runtime_resume(zdev->dev); 1119 if (ret) 1120 return ret; 1121 } 1122 1123 ret = zynqmp_dma_chan_probe(zdev, pdev); 1124 if (ret) { 1125 dev_err_probe(&pdev->dev, ret, "Probing channel failed\n"); 1126 goto err_disable_pm; 1127 } 1128 1129 p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); 1130 p->src_addr_widths = BIT(zdev->chan->bus_width / 8); 1131 1132 ret = dma_async_device_register(&zdev->common); 1133 if (ret) { 1134 dev_err(zdev->dev, "failed to register the dma device\n"); 1135 goto free_chan_resources; 1136 } 1137 1138 ret = of_dma_controller_register(pdev->dev.of_node, 1139 of_zynqmp_dma_xlate, zdev); 1140 if (ret) { 1141 dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n"); 1142 dma_async_device_unregister(&zdev->common); 1143 goto free_chan_resources; 1144 } 1145 1146 pm_runtime_put_sync_autosuspend(zdev->dev); 1147 1148 return 0; 1149 1150 free_chan_resources: 1151 zynqmp_dma_chan_remove(zdev->chan); 1152 err_disable_pm: 1153 if (!pm_runtime_enabled(zdev->dev)) 1154 zynqmp_dma_runtime_suspend(zdev->dev); 1155 pm_runtime_disable(zdev->dev); 1156 return ret; 1157 } 1158 1159 /** 1160 * zynqmp_dma_remove - Driver remove function 1161 * @pdev: Pointer to the platform_device structure 1162 * 1163 * Return: Always '0' 1164 */ 1165 static void zynqmp_dma_remove(struct platform_device *pdev) 1166 { 1167 struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); 1168 1169 of_dma_controller_free(pdev->dev.of_node); 1170 dma_async_device_unregister(&zdev->common); 1171 1172 zynqmp_dma_chan_remove(zdev->chan); 1173 if (pm_runtime_active(zdev->dev)) 1174 zynqmp_dma_runtime_suspend(zdev->dev); 1175 pm_runtime_disable(zdev->dev); 1176 } 1177 1178 static const struct of_device_id zynqmp_dma_of_match[] = { 1179 { .compatible = "amd,versal2-dma-1.0", .data = &versal2_dma_config }, 1180 { .compatible = "xlnx,zynqmp-dma-1.0", }, 1181 {} 1182 }; 1183 MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); 1184 1185 static struct platform_driver zynqmp_dma_driver = { 1186 .driver = { 1187 .name = "xilinx-zynqmp-dma", 1188 .of_match_table = zynqmp_dma_of_match, 1189 .pm = &zynqmp_dma_dev_pm_ops, 1190 }, 1191 .probe = zynqmp_dma_probe, 1192 .remove = zynqmp_dma_remove, 1193 .shutdown = zynqmp_dma_remove, 1194 }; 1195 1196 module_platform_driver(zynqmp_dma_driver); 1197 1198 MODULE_LICENSE("GPL"); 1199 MODULE_AUTHOR("Xilinx, Inc."); 1200 MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); 1201