1 /* 2 * DMA driver for Xilinx Video DMA Engine 3 * 4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 5 * 6 * Based on the Freescale DMA driver. 7 * 8 * Description: 9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 10 * core that provides high-bandwidth direct memory access between memory 11 * and AXI4-Stream type video target peripherals. The core provides efficient 12 * two dimensional DMA operations with independent asynchronous read (S2MM) 13 * and write (MM2S) channel operation. It can be configured to have either 14 * one channel or two channels. If configured as two channels, one is to 15 * transmit to the video device (MM2S) and another is to receive from the 16 * video device (S2MM). Initialization, status, interrupt and management 17 * registers are accessed through an AXI4-Lite slave interface. 18 * 19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 20 * provides high-bandwidth one dimensional direct memory access between memory 21 * and AXI4-Stream target peripherals. It supports one receive and one 22 * transmit channel, both of them optional at synthesis time. 23 * 24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 25 * Access (DMA) between a memory-mapped source address and a memory-mapped 26 * destination address. 27 * 28 * This program is free software: you can redistribute it and/or modify 29 * it under the terms of the GNU General Public License as published by 30 * the Free Software Foundation, either version 2 of the License, or 31 * (at your option) any later version. 32 */ 33 34 #include <linux/bitops.h> 35 #include <linux/dmapool.h> 36 #include <linux/dma/xilinx_dma.h> 37 #include <linux/init.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/iopoll.h> 41 #include <linux/module.h> 42 #include <linux/of_address.h> 43 #include <linux/of_dma.h> 44 #include <linux/of_platform.h> 45 #include <linux/of_irq.h> 46 #include <linux/slab.h> 47 #include <linux/clk.h> 48 #include <linux/io-64-nonatomic-lo-hi.h> 49 50 #include "../dmaengine.h" 51 52 /* Register/Descriptor Offsets */ 53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 57 58 /* Control Registers */ 59 #define XILINX_DMA_REG_DMACR 0x0000 60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 71 #define XILINX_DMA_DMACR_RESET BIT(2) 72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 75 76 #define XILINX_DMA_REG_DMASR 0x0004 77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 89 #define XILINX_DMA_DMASR_IDLE BIT(1) 90 #define XILINX_DMA_DMASR_HALTED BIT(0) 91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 93 94 #define XILINX_DMA_REG_CURDESC 0x0008 95 #define XILINX_DMA_REG_TAILDESC 0x0010 96 #define XILINX_DMA_REG_REG_INDEX 0x0014 97 #define XILINX_DMA_REG_FRMSTORE 0x0018 98 #define XILINX_DMA_REG_THRESHOLD 0x001c 99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 100 #define XILINX_DMA_REG_PARK_PTR 0x0028 101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 102 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 103 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 104 105 /* Register Direct Mode Registers */ 106 #define XILINX_DMA_REG_VSIZE 0x0000 107 #define XILINX_DMA_REG_HSIZE 0x0004 108 109 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 110 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 111 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 112 113 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 114 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 115 116 /* HW specific definitions */ 117 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 118 119 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 121 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 122 XILINX_DMA_DMASR_ERR_IRQ) 123 124 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 125 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 126 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 127 XILINX_DMA_DMASR_SG_DEC_ERR | \ 128 XILINX_DMA_DMASR_SG_SLV_ERR | \ 129 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 130 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 131 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 132 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 133 XILINX_DMA_DMASR_DMA_INT_ERR) 134 135 /* 136 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 137 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 138 * is enabled in the h/w system. 139 */ 140 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 141 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 144 XILINX_DMA_DMASR_DMA_INT_ERR) 145 146 /* Axi VDMA Flush on Fsync bits */ 147 #define XILINX_DMA_FLUSH_S2MM 3 148 #define XILINX_DMA_FLUSH_MM2S 2 149 #define XILINX_DMA_FLUSH_BOTH 1 150 151 /* Delay loop counter to prevent hardware failure */ 152 #define XILINX_DMA_LOOP_COUNT 1000000 153 154 /* AXI DMA Specific Registers/Offsets */ 155 #define XILINX_DMA_REG_SRCDSTADDR 0x18 156 #define XILINX_DMA_REG_BTT 0x28 157 158 /* AXI DMA Specific Masks/Bit fields */ 159 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) 160 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 161 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 162 #define XILINX_DMA_CR_COALESCE_SHIFT 16 163 #define XILINX_DMA_BD_SOP BIT(27) 164 #define XILINX_DMA_BD_EOP BIT(26) 165 #define XILINX_DMA_COALESCE_MAX 255 166 #define XILINX_DMA_NUM_APP_WORDS 5 167 168 /* Multi-Channel DMA Descriptor offsets*/ 169 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 170 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 171 172 /* Multi-Channel DMA Masks/Shifts */ 173 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 174 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 175 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 176 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 177 #define XILINX_DMA_BD_STRIDE_SHIFT 0 178 #define XILINX_DMA_BD_VSIZE_SHIFT 19 179 180 /* AXI CDMA Specific Registers/Offsets */ 181 #define XILINX_CDMA_REG_SRCADDR 0x18 182 #define XILINX_CDMA_REG_DSTADDR 0x20 183 184 /* AXI CDMA Specific Masks */ 185 #define XILINX_CDMA_CR_SGMODE BIT(3) 186 187 /** 188 * struct xilinx_vdma_desc_hw - Hardware Descriptor 189 * @next_desc: Next Descriptor Pointer @0x00 190 * @pad1: Reserved @0x04 191 * @buf_addr: Buffer address @0x08 192 * @buf_addr_msb: MSB of Buffer address @0x0C 193 * @vsize: Vertical Size @0x10 194 * @hsize: Horizontal Size @0x14 195 * @stride: Number of bytes between the first 196 * pixels of each horizontal line @0x18 197 */ 198 struct xilinx_vdma_desc_hw { 199 u32 next_desc; 200 u32 pad1; 201 u32 buf_addr; 202 u32 buf_addr_msb; 203 u32 vsize; 204 u32 hsize; 205 u32 stride; 206 } __aligned(64); 207 208 /** 209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 210 * @next_desc: Next Descriptor Pointer @0x00 211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 212 * @buf_addr: Buffer address @0x08 213 * @buf_addr_msb: MSB of Buffer address @0x0C 214 * @pad1: Reserved @0x10 215 * @pad2: Reserved @0x14 216 * @control: Control field @0x18 217 * @status: Status field @0x1C 218 * @app: APP Fields @0x20 - 0x30 219 */ 220 struct xilinx_axidma_desc_hw { 221 u32 next_desc; 222 u32 next_desc_msb; 223 u32 buf_addr; 224 u32 buf_addr_msb; 225 u32 mcdma_control; 226 u32 vsize_stride; 227 u32 control; 228 u32 status; 229 u32 app[XILINX_DMA_NUM_APP_WORDS]; 230 } __aligned(64); 231 232 /** 233 * struct xilinx_cdma_desc_hw - Hardware Descriptor 234 * @next_desc: Next Descriptor Pointer @0x00 235 * @next_descmsb: Next Descriptor Pointer MSB @0x04 236 * @src_addr: Source address @0x08 237 * @src_addrmsb: Source address MSB @0x0C 238 * @dest_addr: Destination address @0x10 239 * @dest_addrmsb: Destination address MSB @0x14 240 * @control: Control field @0x18 241 * @status: Status field @0x1C 242 */ 243 struct xilinx_cdma_desc_hw { 244 u32 next_desc; 245 u32 next_desc_msb; 246 u32 src_addr; 247 u32 src_addr_msb; 248 u32 dest_addr; 249 u32 dest_addr_msb; 250 u32 control; 251 u32 status; 252 } __aligned(64); 253 254 /** 255 * struct xilinx_vdma_tx_segment - Descriptor segment 256 * @hw: Hardware descriptor 257 * @node: Node in the descriptor segments list 258 * @phys: Physical address of segment 259 */ 260 struct xilinx_vdma_tx_segment { 261 struct xilinx_vdma_desc_hw hw; 262 struct list_head node; 263 dma_addr_t phys; 264 } __aligned(64); 265 266 /** 267 * struct xilinx_axidma_tx_segment - Descriptor segment 268 * @hw: Hardware descriptor 269 * @node: Node in the descriptor segments list 270 * @phys: Physical address of segment 271 */ 272 struct xilinx_axidma_tx_segment { 273 struct xilinx_axidma_desc_hw hw; 274 struct list_head node; 275 dma_addr_t phys; 276 } __aligned(64); 277 278 /** 279 * struct xilinx_cdma_tx_segment - Descriptor segment 280 * @hw: Hardware descriptor 281 * @node: Node in the descriptor segments list 282 * @phys: Physical address of segment 283 */ 284 struct xilinx_cdma_tx_segment { 285 struct xilinx_cdma_desc_hw hw; 286 struct list_head node; 287 dma_addr_t phys; 288 } __aligned(64); 289 290 /** 291 * struct xilinx_dma_tx_descriptor - Per Transaction structure 292 * @async_tx: Async transaction descriptor 293 * @segments: TX segments list 294 * @node: Node in the channel descriptors list 295 * @cyclic: Check for cyclic transfers. 296 */ 297 struct xilinx_dma_tx_descriptor { 298 struct dma_async_tx_descriptor async_tx; 299 struct list_head segments; 300 struct list_head node; 301 bool cyclic; 302 }; 303 304 /** 305 * struct xilinx_dma_chan - Driver specific DMA channel structure 306 * @xdev: Driver specific device structure 307 * @ctrl_offset: Control registers offset 308 * @desc_offset: TX descriptor registers offset 309 * @lock: Descriptor operation lock 310 * @pending_list: Descriptors waiting 311 * @active_list: Descriptors ready to submit 312 * @done_list: Complete descriptors 313 * @common: DMA common channel 314 * @desc_pool: Descriptors pool 315 * @dev: The dma device 316 * @irq: Channel IRQ 317 * @id: Channel ID 318 * @direction: Transfer direction 319 * @num_frms: Number of frames 320 * @has_sg: Support scatter transfers 321 * @cyclic: Check for cyclic transfers. 322 * @genlock: Support genlock mode 323 * @err: Channel has errors 324 * @tasklet: Cleanup work after irq 325 * @config: Device configuration info 326 * @flush_on_fsync: Flush on Frame sync 327 * @desc_pendingcount: Descriptor pending count 328 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 329 * @desc_submitcount: Descriptor h/w submitted count 330 * @residue: Residue for AXI DMA 331 * @seg_v: Statically allocated segments base 332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 333 * @start_transfer: Differentiate b/w DMA IP's transfer 334 * @stop_transfer: Differentiate b/w DMA IP's quiesce 335 */ 336 struct xilinx_dma_chan { 337 struct xilinx_dma_device *xdev; 338 u32 ctrl_offset; 339 u32 desc_offset; 340 spinlock_t lock; 341 struct list_head pending_list; 342 struct list_head active_list; 343 struct list_head done_list; 344 struct dma_chan common; 345 struct dma_pool *desc_pool; 346 struct device *dev; 347 int irq; 348 int id; 349 enum dma_transfer_direction direction; 350 int num_frms; 351 bool has_sg; 352 bool cyclic; 353 bool genlock; 354 bool err; 355 struct tasklet_struct tasklet; 356 struct xilinx_vdma_config config; 357 bool flush_on_fsync; 358 u32 desc_pendingcount; 359 bool ext_addr; 360 u32 desc_submitcount; 361 u32 residue; 362 struct xilinx_axidma_tx_segment *seg_v; 363 struct xilinx_axidma_tx_segment *cyclic_seg_v; 364 void (*start_transfer)(struct xilinx_dma_chan *chan); 365 int (*stop_transfer)(struct xilinx_dma_chan *chan); 366 u16 tdest; 367 }; 368 369 struct xilinx_dma_config { 370 enum xdma_ip_type dmatype; 371 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 372 struct clk **tx_clk, struct clk **txs_clk, 373 struct clk **rx_clk, struct clk **rxs_clk); 374 }; 375 376 /** 377 * struct xilinx_dma_device - DMA device structure 378 * @regs: I/O mapped base address 379 * @dev: Device Structure 380 * @common: DMA device structure 381 * @chan: Driver specific DMA channel 382 * @has_sg: Specifies whether Scatter-Gather is present or not 383 * @mcdma: Specifies whether Multi-Channel is present or not 384 * @flush_on_fsync: Flush on frame sync 385 * @ext_addr: Indicates 64 bit addressing is supported by dma device 386 * @pdev: Platform device structure pointer 387 * @dma_config: DMA config structure 388 * @axi_clk: DMA Axi4-lite interace clock 389 * @tx_clk: DMA mm2s clock 390 * @txs_clk: DMA mm2s stream clock 391 * @rx_clk: DMA s2mm clock 392 * @rxs_clk: DMA s2mm stream clock 393 * @nr_channels: Number of channels DMA device supports 394 * @chan_id: DMA channel identifier 395 */ 396 struct xilinx_dma_device { 397 void __iomem *regs; 398 struct device *dev; 399 struct dma_device common; 400 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 401 bool has_sg; 402 bool mcdma; 403 u32 flush_on_fsync; 404 bool ext_addr; 405 struct platform_device *pdev; 406 const struct xilinx_dma_config *dma_config; 407 struct clk *axi_clk; 408 struct clk *tx_clk; 409 struct clk *txs_clk; 410 struct clk *rx_clk; 411 struct clk *rxs_clk; 412 u32 nr_channels; 413 u32 chan_id; 414 }; 415 416 /* Macros */ 417 #define to_xilinx_chan(chan) \ 418 container_of(chan, struct xilinx_dma_chan, common) 419 #define to_dma_tx_descriptor(tx) \ 420 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 421 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 422 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 423 cond, delay_us, timeout_us) 424 425 /* IO accessors */ 426 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 427 { 428 return ioread32(chan->xdev->regs + reg); 429 } 430 431 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 432 { 433 iowrite32(value, chan->xdev->regs + reg); 434 } 435 436 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 437 u32 value) 438 { 439 dma_write(chan, chan->desc_offset + reg, value); 440 } 441 442 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 443 { 444 return dma_read(chan, chan->ctrl_offset + reg); 445 } 446 447 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 448 u32 value) 449 { 450 dma_write(chan, chan->ctrl_offset + reg, value); 451 } 452 453 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 454 u32 clr) 455 { 456 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 457 } 458 459 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 460 u32 set) 461 { 462 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 463 } 464 465 /** 466 * vdma_desc_write_64 - 64-bit descriptor write 467 * @chan: Driver specific VDMA channel 468 * @reg: Register to write 469 * @value_lsb: lower address of the descriptor. 470 * @value_msb: upper address of the descriptor. 471 * 472 * Since vdma driver is trying to write to a register offset which is not a 473 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 474 * instead of a single 64 bit register write. 475 */ 476 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 477 u32 value_lsb, u32 value_msb) 478 { 479 /* Write the lsb 32 bits*/ 480 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 481 482 /* Write the msb 32 bits */ 483 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 484 } 485 486 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 487 { 488 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 489 } 490 491 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 492 dma_addr_t addr) 493 { 494 if (chan->ext_addr) 495 dma_writeq(chan, reg, addr); 496 else 497 dma_ctrl_write(chan, reg, addr); 498 } 499 500 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 501 struct xilinx_axidma_desc_hw *hw, 502 dma_addr_t buf_addr, size_t sg_used, 503 size_t period_len) 504 { 505 if (chan->ext_addr) { 506 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 507 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 508 period_len); 509 } else { 510 hw->buf_addr = buf_addr + sg_used + period_len; 511 } 512 } 513 514 /* ----------------------------------------------------------------------------- 515 * Descriptors and segments alloc and free 516 */ 517 518 /** 519 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 520 * @chan: Driver specific DMA channel 521 * 522 * Return: The allocated segment on success and NULL on failure. 523 */ 524 static struct xilinx_vdma_tx_segment * 525 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 526 { 527 struct xilinx_vdma_tx_segment *segment; 528 dma_addr_t phys; 529 530 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 531 if (!segment) 532 return NULL; 533 534 segment->phys = phys; 535 536 return segment; 537 } 538 539 /** 540 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 541 * @chan: Driver specific DMA channel 542 * 543 * Return: The allocated segment on success and NULL on failure. 544 */ 545 static struct xilinx_cdma_tx_segment * 546 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 547 { 548 struct xilinx_cdma_tx_segment *segment; 549 dma_addr_t phys; 550 551 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 552 if (!segment) 553 return NULL; 554 555 segment->phys = phys; 556 557 return segment; 558 } 559 560 /** 561 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 562 * @chan: Driver specific DMA channel 563 * 564 * Return: The allocated segment on success and NULL on failure. 565 */ 566 static struct xilinx_axidma_tx_segment * 567 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 568 { 569 struct xilinx_axidma_tx_segment *segment; 570 dma_addr_t phys; 571 572 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 573 if (!segment) 574 return NULL; 575 576 segment->phys = phys; 577 578 return segment; 579 } 580 581 /** 582 * xilinx_dma_free_tx_segment - Free transaction segment 583 * @chan: Driver specific DMA channel 584 * @segment: DMA transaction segment 585 */ 586 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 587 struct xilinx_axidma_tx_segment *segment) 588 { 589 dma_pool_free(chan->desc_pool, segment, segment->phys); 590 } 591 592 /** 593 * xilinx_cdma_free_tx_segment - Free transaction segment 594 * @chan: Driver specific DMA channel 595 * @segment: DMA transaction segment 596 */ 597 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 598 struct xilinx_cdma_tx_segment *segment) 599 { 600 dma_pool_free(chan->desc_pool, segment, segment->phys); 601 } 602 603 /** 604 * xilinx_vdma_free_tx_segment - Free transaction segment 605 * @chan: Driver specific DMA channel 606 * @segment: DMA transaction segment 607 */ 608 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 609 struct xilinx_vdma_tx_segment *segment) 610 { 611 dma_pool_free(chan->desc_pool, segment, segment->phys); 612 } 613 614 /** 615 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 616 * @chan: Driver specific DMA channel 617 * 618 * Return: The allocated descriptor on success and NULL on failure. 619 */ 620 static struct xilinx_dma_tx_descriptor * 621 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 622 { 623 struct xilinx_dma_tx_descriptor *desc; 624 625 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 626 if (!desc) 627 return NULL; 628 629 INIT_LIST_HEAD(&desc->segments); 630 631 return desc; 632 } 633 634 /** 635 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 636 * @chan: Driver specific DMA channel 637 * @desc: DMA transaction descriptor 638 */ 639 static void 640 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 641 struct xilinx_dma_tx_descriptor *desc) 642 { 643 struct xilinx_vdma_tx_segment *segment, *next; 644 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 645 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 646 647 if (!desc) 648 return; 649 650 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 651 list_for_each_entry_safe(segment, next, &desc->segments, node) { 652 list_del(&segment->node); 653 xilinx_vdma_free_tx_segment(chan, segment); 654 } 655 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 656 list_for_each_entry_safe(cdma_segment, cdma_next, 657 &desc->segments, node) { 658 list_del(&cdma_segment->node); 659 xilinx_cdma_free_tx_segment(chan, cdma_segment); 660 } 661 } else { 662 list_for_each_entry_safe(axidma_segment, axidma_next, 663 &desc->segments, node) { 664 list_del(&axidma_segment->node); 665 xilinx_dma_free_tx_segment(chan, axidma_segment); 666 } 667 } 668 669 kfree(desc); 670 } 671 672 /* Required functions */ 673 674 /** 675 * xilinx_dma_free_desc_list - Free descriptors list 676 * @chan: Driver specific DMA channel 677 * @list: List to parse and delete the descriptor 678 */ 679 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 680 struct list_head *list) 681 { 682 struct xilinx_dma_tx_descriptor *desc, *next; 683 684 list_for_each_entry_safe(desc, next, list, node) { 685 list_del(&desc->node); 686 xilinx_dma_free_tx_descriptor(chan, desc); 687 } 688 } 689 690 /** 691 * xilinx_dma_free_descriptors - Free channel descriptors 692 * @chan: Driver specific DMA channel 693 */ 694 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 695 { 696 unsigned long flags; 697 698 spin_lock_irqsave(&chan->lock, flags); 699 700 xilinx_dma_free_desc_list(chan, &chan->pending_list); 701 xilinx_dma_free_desc_list(chan, &chan->done_list); 702 xilinx_dma_free_desc_list(chan, &chan->active_list); 703 704 spin_unlock_irqrestore(&chan->lock, flags); 705 } 706 707 /** 708 * xilinx_dma_free_chan_resources - Free channel resources 709 * @dchan: DMA channel 710 */ 711 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 712 { 713 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 714 715 dev_dbg(chan->dev, "Free all channel resources.\n"); 716 717 xilinx_dma_free_descriptors(chan); 718 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 719 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); 720 xilinx_dma_free_tx_segment(chan, chan->seg_v); 721 } 722 dma_pool_destroy(chan->desc_pool); 723 chan->desc_pool = NULL; 724 } 725 726 /** 727 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 728 * @chan: Driver specific dma channel 729 * @desc: dma transaction descriptor 730 * @flags: flags for spin lock 731 */ 732 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 733 struct xilinx_dma_tx_descriptor *desc, 734 unsigned long *flags) 735 { 736 dma_async_tx_callback callback; 737 void *callback_param; 738 739 callback = desc->async_tx.callback; 740 callback_param = desc->async_tx.callback_param; 741 if (callback) { 742 spin_unlock_irqrestore(&chan->lock, *flags); 743 callback(callback_param); 744 spin_lock_irqsave(&chan->lock, *flags); 745 } 746 } 747 748 /** 749 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 750 * @chan: Driver specific DMA channel 751 */ 752 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 753 { 754 struct xilinx_dma_tx_descriptor *desc, *next; 755 unsigned long flags; 756 757 spin_lock_irqsave(&chan->lock, flags); 758 759 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 760 struct dmaengine_desc_callback cb; 761 762 if (desc->cyclic) { 763 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 764 break; 765 } 766 767 /* Remove from the list of running transactions */ 768 list_del(&desc->node); 769 770 /* Run the link descriptor callback function */ 771 dmaengine_desc_get_callback(&desc->async_tx, &cb); 772 if (dmaengine_desc_callback_valid(&cb)) { 773 spin_unlock_irqrestore(&chan->lock, flags); 774 dmaengine_desc_callback_invoke(&cb, NULL); 775 spin_lock_irqsave(&chan->lock, flags); 776 } 777 778 /* Run any dependencies, then free the descriptor */ 779 dma_run_dependencies(&desc->async_tx); 780 xilinx_dma_free_tx_descriptor(chan, desc); 781 } 782 783 spin_unlock_irqrestore(&chan->lock, flags); 784 } 785 786 /** 787 * xilinx_dma_do_tasklet - Schedule completion tasklet 788 * @data: Pointer to the Xilinx DMA channel structure 789 */ 790 static void xilinx_dma_do_tasklet(unsigned long data) 791 { 792 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 793 794 xilinx_dma_chan_desc_cleanup(chan); 795 } 796 797 /** 798 * xilinx_dma_alloc_chan_resources - Allocate channel resources 799 * @dchan: DMA channel 800 * 801 * Return: '0' on success and failure value on error 802 */ 803 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 804 { 805 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 806 807 /* Has this channel already been allocated? */ 808 if (chan->desc_pool) 809 return 0; 810 811 /* 812 * We need the descriptor to be aligned to 64bytes 813 * for meeting Xilinx VDMA specification requirement. 814 */ 815 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 816 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", 817 chan->dev, 818 sizeof(struct xilinx_axidma_tx_segment), 819 __alignof__(struct xilinx_axidma_tx_segment), 820 0); 821 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 822 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 823 chan->dev, 824 sizeof(struct xilinx_cdma_tx_segment), 825 __alignof__(struct xilinx_cdma_tx_segment), 826 0); 827 } else { 828 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 829 chan->dev, 830 sizeof(struct xilinx_vdma_tx_segment), 831 __alignof__(struct xilinx_vdma_tx_segment), 832 0); 833 } 834 835 if (!chan->desc_pool) { 836 dev_err(chan->dev, 837 "unable to allocate channel %d descriptor pool\n", 838 chan->id); 839 return -ENOMEM; 840 } 841 842 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 843 /* 844 * For AXI DMA case after submitting a pending_list, keep 845 * an extra segment allocated so that the "next descriptor" 846 * pointer on the tail descriptor always points to a 847 * valid descriptor, even when paused after reaching taildesc. 848 * This way, it is possible to issue additional 849 * transfers without halting and restarting the channel. 850 */ 851 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); 852 853 /* 854 * For cyclic DMA mode we need to program the tail Descriptor 855 * register with a value which is not a part of the BD chain 856 * so allocating a desc segment during channel allocation for 857 * programming tail descriptor. 858 */ 859 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); 860 } 861 862 dma_cookie_init(dchan); 863 864 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 865 /* For AXI DMA resetting once channel will reset the 866 * other channel as well so enable the interrupts here. 867 */ 868 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 869 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 870 } 871 872 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 873 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 874 XILINX_CDMA_CR_SGMODE); 875 876 return 0; 877 } 878 879 /** 880 * xilinx_dma_tx_status - Get DMA transaction status 881 * @dchan: DMA channel 882 * @cookie: Transaction identifier 883 * @txstate: Transaction state 884 * 885 * Return: DMA transaction status 886 */ 887 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 888 dma_cookie_t cookie, 889 struct dma_tx_state *txstate) 890 { 891 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 892 struct xilinx_dma_tx_descriptor *desc; 893 struct xilinx_axidma_tx_segment *segment; 894 struct xilinx_axidma_desc_hw *hw; 895 enum dma_status ret; 896 unsigned long flags; 897 u32 residue = 0; 898 899 ret = dma_cookie_status(dchan, cookie, txstate); 900 if (ret == DMA_COMPLETE || !txstate) 901 return ret; 902 903 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 904 spin_lock_irqsave(&chan->lock, flags); 905 906 desc = list_last_entry(&chan->active_list, 907 struct xilinx_dma_tx_descriptor, node); 908 if (chan->has_sg) { 909 list_for_each_entry(segment, &desc->segments, node) { 910 hw = &segment->hw; 911 residue += (hw->control - hw->status) & 912 XILINX_DMA_MAX_TRANS_LEN; 913 } 914 } 915 spin_unlock_irqrestore(&chan->lock, flags); 916 917 chan->residue = residue; 918 dma_set_residue(txstate, chan->residue); 919 } 920 921 return ret; 922 } 923 924 /** 925 * xilinx_dma_is_running - Check if DMA channel is running 926 * @chan: Driver specific DMA channel 927 * 928 * Return: '1' if running, '0' if not. 929 */ 930 static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) 931 { 932 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 933 XILINX_DMA_DMASR_HALTED) && 934 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & 935 XILINX_DMA_DMACR_RUNSTOP); 936 } 937 938 /** 939 * xilinx_dma_is_idle - Check if DMA channel is idle 940 * @chan: Driver specific DMA channel 941 * 942 * Return: '1' if idle, '0' if not. 943 */ 944 static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) 945 { 946 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 947 XILINX_DMA_DMASR_IDLE; 948 } 949 950 /** 951 * xilinx_dma_stop_transfer - Halt DMA channel 952 * @chan: Driver specific DMA channel 953 */ 954 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 955 { 956 u32 val; 957 958 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 959 960 /* Wait for the hardware to halt */ 961 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 962 val & XILINX_DMA_DMASR_HALTED, 0, 963 XILINX_DMA_LOOP_COUNT); 964 } 965 966 /** 967 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 968 * @chan: Driver specific DMA channel 969 */ 970 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 971 { 972 u32 val; 973 974 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 975 val & XILINX_DMA_DMASR_IDLE, 0, 976 XILINX_DMA_LOOP_COUNT); 977 } 978 979 /** 980 * xilinx_dma_start - Start DMA channel 981 * @chan: Driver specific DMA channel 982 */ 983 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 984 { 985 int err; 986 u32 val; 987 988 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 989 990 /* Wait for the hardware to start */ 991 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 992 !(val & XILINX_DMA_DMASR_HALTED), 0, 993 XILINX_DMA_LOOP_COUNT); 994 995 if (err) { 996 dev_err(chan->dev, "Cannot start channel %p: %x\n", 997 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 998 999 chan->err = true; 1000 } 1001 } 1002 1003 /** 1004 * xilinx_vdma_start_transfer - Starts VDMA transfer 1005 * @chan: Driver specific channel struct pointer 1006 */ 1007 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1008 { 1009 struct xilinx_vdma_config *config = &chan->config; 1010 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1011 u32 reg; 1012 struct xilinx_vdma_tx_segment *tail_segment; 1013 1014 /* This function was invoked with lock held */ 1015 if (chan->err) 1016 return; 1017 1018 if (list_empty(&chan->pending_list)) 1019 return; 1020 1021 desc = list_first_entry(&chan->pending_list, 1022 struct xilinx_dma_tx_descriptor, node); 1023 tail_desc = list_last_entry(&chan->pending_list, 1024 struct xilinx_dma_tx_descriptor, node); 1025 1026 tail_segment = list_last_entry(&tail_desc->segments, 1027 struct xilinx_vdma_tx_segment, node); 1028 1029 /* If it is SG mode and hardware is busy, cannot submit */ 1030 if (chan->has_sg && xilinx_dma_is_running(chan) && 1031 !xilinx_dma_is_idle(chan)) { 1032 dev_dbg(chan->dev, "DMA controller still busy\n"); 1033 return; 1034 } 1035 1036 /* 1037 * If hardware is idle, then all descriptors on the running lists are 1038 * done, start new transfers 1039 */ 1040 if (chan->has_sg) 1041 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1042 desc->async_tx.phys); 1043 1044 /* Configure the hardware using info in the config structure */ 1045 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1046 1047 if (config->frm_cnt_en) 1048 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1049 else 1050 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1051 1052 /* Configure channel to allow number frame buffers */ 1053 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, 1054 chan->desc_pendingcount); 1055 1056 /* 1057 * With SG, start with circular mode, so that BDs can be fetched. 1058 * In direct register mode, if not parking, enable circular mode 1059 */ 1060 if (chan->has_sg || !config->park) 1061 reg |= XILINX_DMA_DMACR_CIRC_EN; 1062 1063 if (config->park) 1064 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1065 1066 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1067 1068 if (config->park && (config->park_frm >= 0) && 1069 (config->park_frm < chan->num_frms)) { 1070 if (chan->direction == DMA_MEM_TO_DEV) 1071 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1072 config->park_frm << 1073 XILINX_DMA_PARK_PTR_RD_REF_SHIFT); 1074 else 1075 dma_write(chan, XILINX_DMA_REG_PARK_PTR, 1076 config->park_frm << 1077 XILINX_DMA_PARK_PTR_WR_REF_SHIFT); 1078 } 1079 1080 /* Start the hardware */ 1081 xilinx_dma_start(chan); 1082 1083 if (chan->err) 1084 return; 1085 1086 /* Start the transfer */ 1087 if (chan->has_sg) { 1088 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1089 tail_segment->phys); 1090 } else { 1091 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1092 int i = 0; 1093 1094 if (chan->desc_submitcount < chan->num_frms) 1095 i = chan->desc_submitcount; 1096 1097 list_for_each_entry(segment, &desc->segments, node) { 1098 if (chan->ext_addr) 1099 vdma_desc_write_64(chan, 1100 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1101 segment->hw.buf_addr, 1102 segment->hw.buf_addr_msb); 1103 else 1104 vdma_desc_write(chan, 1105 XILINX_VDMA_REG_START_ADDRESS(i++), 1106 segment->hw.buf_addr); 1107 1108 last = segment; 1109 } 1110 1111 if (!last) 1112 return; 1113 1114 /* HW expects these parameters to be same for one transaction */ 1115 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1116 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1117 last->hw.stride); 1118 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1119 } 1120 1121 if (!chan->has_sg) { 1122 list_del(&desc->node); 1123 list_add_tail(&desc->node, &chan->active_list); 1124 chan->desc_submitcount++; 1125 chan->desc_pendingcount--; 1126 if (chan->desc_submitcount == chan->num_frms) 1127 chan->desc_submitcount = 0; 1128 } else { 1129 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1130 chan->desc_pendingcount = 0; 1131 } 1132 } 1133 1134 /** 1135 * xilinx_cdma_start_transfer - Starts cdma transfer 1136 * @chan: Driver specific channel struct pointer 1137 */ 1138 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1139 { 1140 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1141 struct xilinx_cdma_tx_segment *tail_segment; 1142 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1143 1144 if (chan->err) 1145 return; 1146 1147 if (list_empty(&chan->pending_list)) 1148 return; 1149 1150 head_desc = list_first_entry(&chan->pending_list, 1151 struct xilinx_dma_tx_descriptor, node); 1152 tail_desc = list_last_entry(&chan->pending_list, 1153 struct xilinx_dma_tx_descriptor, node); 1154 tail_segment = list_last_entry(&tail_desc->segments, 1155 struct xilinx_cdma_tx_segment, node); 1156 1157 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1158 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1159 ctrl_reg |= chan->desc_pendingcount << 1160 XILINX_DMA_CR_COALESCE_SHIFT; 1161 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1162 } 1163 1164 if (chan->has_sg) { 1165 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1166 head_desc->async_tx.phys); 1167 1168 /* Update tail ptr register which will start the transfer */ 1169 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1170 tail_segment->phys); 1171 } else { 1172 /* In simple mode */ 1173 struct xilinx_cdma_tx_segment *segment; 1174 struct xilinx_cdma_desc_hw *hw; 1175 1176 segment = list_first_entry(&head_desc->segments, 1177 struct xilinx_cdma_tx_segment, 1178 node); 1179 1180 hw = &segment->hw; 1181 1182 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); 1183 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); 1184 1185 /* Start the transfer */ 1186 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1187 hw->control & XILINX_DMA_MAX_TRANS_LEN); 1188 } 1189 1190 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1191 chan->desc_pendingcount = 0; 1192 } 1193 1194 /** 1195 * xilinx_dma_start_transfer - Starts DMA transfer 1196 * @chan: Driver specific channel struct pointer 1197 */ 1198 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1199 { 1200 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1201 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; 1202 u32 reg; 1203 1204 if (chan->err) 1205 return; 1206 1207 if (list_empty(&chan->pending_list)) 1208 return; 1209 1210 /* If it is SG mode and hardware is busy, cannot submit */ 1211 if (chan->has_sg && xilinx_dma_is_running(chan) && 1212 !xilinx_dma_is_idle(chan)) { 1213 dev_dbg(chan->dev, "DMA controller still busy\n"); 1214 return; 1215 } 1216 1217 head_desc = list_first_entry(&chan->pending_list, 1218 struct xilinx_dma_tx_descriptor, node); 1219 tail_desc = list_last_entry(&chan->pending_list, 1220 struct xilinx_dma_tx_descriptor, node); 1221 tail_segment = list_last_entry(&tail_desc->segments, 1222 struct xilinx_axidma_tx_segment, node); 1223 1224 if (chan->has_sg && !chan->xdev->mcdma) { 1225 old_head = list_first_entry(&head_desc->segments, 1226 struct xilinx_axidma_tx_segment, node); 1227 new_head = chan->seg_v; 1228 /* Copy Buffer Descriptor fields. */ 1229 new_head->hw = old_head->hw; 1230 1231 /* Swap and save new reserve */ 1232 list_replace_init(&old_head->node, &new_head->node); 1233 chan->seg_v = old_head; 1234 1235 tail_segment->hw.next_desc = chan->seg_v->phys; 1236 head_desc->async_tx.phys = new_head->phys; 1237 } 1238 1239 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1240 1241 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1242 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1243 reg |= chan->desc_pendingcount << 1244 XILINX_DMA_CR_COALESCE_SHIFT; 1245 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1246 } 1247 1248 if (chan->has_sg && !chan->xdev->mcdma) 1249 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1250 head_desc->async_tx.phys); 1251 1252 if (chan->has_sg && chan->xdev->mcdma) { 1253 if (chan->direction == DMA_MEM_TO_DEV) { 1254 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1255 head_desc->async_tx.phys); 1256 } else { 1257 if (!chan->tdest) { 1258 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1259 head_desc->async_tx.phys); 1260 } else { 1261 dma_ctrl_write(chan, 1262 XILINX_DMA_MCRX_CDESC(chan->tdest), 1263 head_desc->async_tx.phys); 1264 } 1265 } 1266 } 1267 1268 xilinx_dma_start(chan); 1269 1270 if (chan->err) 1271 return; 1272 1273 /* Start the transfer */ 1274 if (chan->has_sg && !chan->xdev->mcdma) { 1275 if (chan->cyclic) 1276 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1277 chan->cyclic_seg_v->phys); 1278 else 1279 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1280 tail_segment->phys); 1281 } else if (chan->has_sg && chan->xdev->mcdma) { 1282 if (chan->direction == DMA_MEM_TO_DEV) { 1283 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1284 tail_segment->phys); 1285 } else { 1286 if (!chan->tdest) { 1287 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1288 tail_segment->phys); 1289 } else { 1290 dma_ctrl_write(chan, 1291 XILINX_DMA_MCRX_TDESC(chan->tdest), 1292 tail_segment->phys); 1293 } 1294 } 1295 } else { 1296 struct xilinx_axidma_tx_segment *segment; 1297 struct xilinx_axidma_desc_hw *hw; 1298 1299 segment = list_first_entry(&head_desc->segments, 1300 struct xilinx_axidma_tx_segment, 1301 node); 1302 hw = &segment->hw; 1303 1304 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1305 1306 /* Start the transfer */ 1307 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1308 hw->control & XILINX_DMA_MAX_TRANS_LEN); 1309 } 1310 1311 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1312 chan->desc_pendingcount = 0; 1313 } 1314 1315 /** 1316 * xilinx_dma_issue_pending - Issue pending transactions 1317 * @dchan: DMA channel 1318 */ 1319 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1320 { 1321 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1322 unsigned long flags; 1323 1324 spin_lock_irqsave(&chan->lock, flags); 1325 chan->start_transfer(chan); 1326 spin_unlock_irqrestore(&chan->lock, flags); 1327 } 1328 1329 /** 1330 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1331 * @chan : xilinx DMA channel 1332 * 1333 * CONTEXT: hardirq 1334 */ 1335 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1336 { 1337 struct xilinx_dma_tx_descriptor *desc, *next; 1338 1339 /* This function was invoked with lock held */ 1340 if (list_empty(&chan->active_list)) 1341 return; 1342 1343 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1344 list_del(&desc->node); 1345 if (!desc->cyclic) 1346 dma_cookie_complete(&desc->async_tx); 1347 list_add_tail(&desc->node, &chan->done_list); 1348 } 1349 } 1350 1351 /** 1352 * xilinx_dma_reset - Reset DMA channel 1353 * @chan: Driver specific DMA channel 1354 * 1355 * Return: '0' on success and failure value on error 1356 */ 1357 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1358 { 1359 int err; 1360 u32 tmp; 1361 1362 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1363 1364 /* Wait for the hardware to finish reset */ 1365 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1366 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1367 XILINX_DMA_LOOP_COUNT); 1368 1369 if (err) { 1370 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1371 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1372 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1373 return -ETIMEDOUT; 1374 } 1375 1376 chan->err = false; 1377 1378 return err; 1379 } 1380 1381 /** 1382 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1383 * @chan: Driver specific DMA channel 1384 * 1385 * Return: '0' on success and failure value on error 1386 */ 1387 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1388 { 1389 int err; 1390 1391 /* Reset VDMA */ 1392 err = xilinx_dma_reset(chan); 1393 if (err) 1394 return err; 1395 1396 /* Enable interrupts */ 1397 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1398 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1399 1400 return 0; 1401 } 1402 1403 /** 1404 * xilinx_dma_irq_handler - DMA Interrupt handler 1405 * @irq: IRQ number 1406 * @data: Pointer to the Xilinx DMA channel structure 1407 * 1408 * Return: IRQ_HANDLED/IRQ_NONE 1409 */ 1410 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1411 { 1412 struct xilinx_dma_chan *chan = data; 1413 u32 status; 1414 1415 /* Read the status and ack the interrupts. */ 1416 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1417 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1418 return IRQ_NONE; 1419 1420 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1421 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1422 1423 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1424 /* 1425 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1426 * error is recoverable, ignore it. Otherwise flag the error. 1427 * 1428 * Only recoverable errors can be cleared in the DMASR register, 1429 * make sure not to write to other error bits to 1. 1430 */ 1431 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1432 1433 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1434 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1435 1436 if (!chan->flush_on_fsync || 1437 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1438 dev_err(chan->dev, 1439 "Channel %p has errors %x, cdr %x tdr %x\n", 1440 chan, errors, 1441 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1442 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1443 chan->err = true; 1444 } 1445 } 1446 1447 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1448 /* 1449 * Device takes too long to do the transfer when user requires 1450 * responsiveness. 1451 */ 1452 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1453 } 1454 1455 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1456 spin_lock(&chan->lock); 1457 xilinx_dma_complete_descriptor(chan); 1458 chan->start_transfer(chan); 1459 spin_unlock(&chan->lock); 1460 } 1461 1462 tasklet_schedule(&chan->tasklet); 1463 return IRQ_HANDLED; 1464 } 1465 1466 /** 1467 * append_desc_queue - Queuing descriptor 1468 * @chan: Driver specific dma channel 1469 * @desc: dma transaction descriptor 1470 */ 1471 static void append_desc_queue(struct xilinx_dma_chan *chan, 1472 struct xilinx_dma_tx_descriptor *desc) 1473 { 1474 struct xilinx_vdma_tx_segment *tail_segment; 1475 struct xilinx_dma_tx_descriptor *tail_desc; 1476 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1477 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1478 1479 if (list_empty(&chan->pending_list)) 1480 goto append; 1481 1482 /* 1483 * Add the hardware descriptor to the chain of hardware descriptors 1484 * that already exists in memory. 1485 */ 1486 tail_desc = list_last_entry(&chan->pending_list, 1487 struct xilinx_dma_tx_descriptor, node); 1488 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1489 tail_segment = list_last_entry(&tail_desc->segments, 1490 struct xilinx_vdma_tx_segment, 1491 node); 1492 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1493 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1494 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1495 struct xilinx_cdma_tx_segment, 1496 node); 1497 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1498 } else { 1499 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1500 struct xilinx_axidma_tx_segment, 1501 node); 1502 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1503 } 1504 1505 /* 1506 * Add the software descriptor and all children to the list 1507 * of pending transactions 1508 */ 1509 append: 1510 list_add_tail(&desc->node, &chan->pending_list); 1511 chan->desc_pendingcount++; 1512 1513 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1514 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1515 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1516 chan->desc_pendingcount = chan->num_frms; 1517 } 1518 } 1519 1520 /** 1521 * xilinx_dma_tx_submit - Submit DMA transaction 1522 * @tx: Async transaction descriptor 1523 * 1524 * Return: cookie value on success and failure value on error 1525 */ 1526 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1527 { 1528 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1529 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1530 dma_cookie_t cookie; 1531 unsigned long flags; 1532 int err; 1533 1534 if (chan->cyclic) { 1535 xilinx_dma_free_tx_descriptor(chan, desc); 1536 return -EBUSY; 1537 } 1538 1539 if (chan->err) { 1540 /* 1541 * If reset fails, need to hard reset the system. 1542 * Channel is no longer functional 1543 */ 1544 err = xilinx_dma_chan_reset(chan); 1545 if (err < 0) 1546 return err; 1547 } 1548 1549 spin_lock_irqsave(&chan->lock, flags); 1550 1551 cookie = dma_cookie_assign(tx); 1552 1553 /* Put this transaction onto the tail of the pending queue */ 1554 append_desc_queue(chan, desc); 1555 1556 if (desc->cyclic) 1557 chan->cyclic = true; 1558 1559 spin_unlock_irqrestore(&chan->lock, flags); 1560 1561 return cookie; 1562 } 1563 1564 /** 1565 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1566 * DMA_SLAVE transaction 1567 * @dchan: DMA channel 1568 * @xt: Interleaved template pointer 1569 * @flags: transfer ack flags 1570 * 1571 * Return: Async transaction descriptor on success and NULL on failure 1572 */ 1573 static struct dma_async_tx_descriptor * 1574 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1575 struct dma_interleaved_template *xt, 1576 unsigned long flags) 1577 { 1578 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1579 struct xilinx_dma_tx_descriptor *desc; 1580 struct xilinx_vdma_tx_segment *segment, *prev = NULL; 1581 struct xilinx_vdma_desc_hw *hw; 1582 1583 if (!is_slave_direction(xt->dir)) 1584 return NULL; 1585 1586 if (!xt->numf || !xt->sgl[0].size) 1587 return NULL; 1588 1589 if (xt->frame_size != 1) 1590 return NULL; 1591 1592 /* Allocate a transaction descriptor. */ 1593 desc = xilinx_dma_alloc_tx_descriptor(chan); 1594 if (!desc) 1595 return NULL; 1596 1597 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1598 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1599 async_tx_ack(&desc->async_tx); 1600 1601 /* Allocate the link descriptor from DMA pool */ 1602 segment = xilinx_vdma_alloc_tx_segment(chan); 1603 if (!segment) 1604 goto error; 1605 1606 /* Fill in the hardware descriptor */ 1607 hw = &segment->hw; 1608 hw->vsize = xt->numf; 1609 hw->hsize = xt->sgl[0].size; 1610 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1611 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1612 hw->stride |= chan->config.frm_dly << 1613 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1614 1615 if (xt->dir != DMA_MEM_TO_DEV) { 1616 if (chan->ext_addr) { 1617 hw->buf_addr = lower_32_bits(xt->dst_start); 1618 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1619 } else { 1620 hw->buf_addr = xt->dst_start; 1621 } 1622 } else { 1623 if (chan->ext_addr) { 1624 hw->buf_addr = lower_32_bits(xt->src_start); 1625 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1626 } else { 1627 hw->buf_addr = xt->src_start; 1628 } 1629 } 1630 1631 /* Insert the segment into the descriptor segments list. */ 1632 list_add_tail(&segment->node, &desc->segments); 1633 1634 prev = segment; 1635 1636 /* Link the last hardware descriptor with the first. */ 1637 segment = list_first_entry(&desc->segments, 1638 struct xilinx_vdma_tx_segment, node); 1639 desc->async_tx.phys = segment->phys; 1640 1641 return &desc->async_tx; 1642 1643 error: 1644 xilinx_dma_free_tx_descriptor(chan, desc); 1645 return NULL; 1646 } 1647 1648 /** 1649 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1650 * @dchan: DMA channel 1651 * @dma_dst: destination address 1652 * @dma_src: source address 1653 * @len: transfer length 1654 * @flags: transfer ack flags 1655 * 1656 * Return: Async transaction descriptor on success and NULL on failure 1657 */ 1658 static struct dma_async_tx_descriptor * 1659 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1660 dma_addr_t dma_src, size_t len, unsigned long flags) 1661 { 1662 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1663 struct xilinx_dma_tx_descriptor *desc; 1664 struct xilinx_cdma_tx_segment *segment; 1665 struct xilinx_cdma_desc_hw *hw; 1666 1667 if (!len || len > XILINX_DMA_MAX_TRANS_LEN) 1668 return NULL; 1669 1670 desc = xilinx_dma_alloc_tx_descriptor(chan); 1671 if (!desc) 1672 return NULL; 1673 1674 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1675 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1676 1677 /* Allocate the link descriptor from DMA pool */ 1678 segment = xilinx_cdma_alloc_tx_segment(chan); 1679 if (!segment) 1680 goto error; 1681 1682 hw = &segment->hw; 1683 hw->control = len; 1684 hw->src_addr = dma_src; 1685 hw->dest_addr = dma_dst; 1686 if (chan->ext_addr) { 1687 hw->src_addr_msb = upper_32_bits(dma_src); 1688 hw->dest_addr_msb = upper_32_bits(dma_dst); 1689 } 1690 1691 /* Insert the segment into the descriptor segments list. */ 1692 list_add_tail(&segment->node, &desc->segments); 1693 1694 desc->async_tx.phys = segment->phys; 1695 hw->next_desc = segment->phys; 1696 1697 return &desc->async_tx; 1698 1699 error: 1700 xilinx_dma_free_tx_descriptor(chan, desc); 1701 return NULL; 1702 } 1703 1704 /** 1705 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1706 * @dchan: DMA channel 1707 * @sgl: scatterlist to transfer to/from 1708 * @sg_len: number of entries in @scatterlist 1709 * @direction: DMA direction 1710 * @flags: transfer ack flags 1711 * @context: APP words of the descriptor 1712 * 1713 * Return: Async transaction descriptor on success and NULL on failure 1714 */ 1715 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1716 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1717 enum dma_transfer_direction direction, unsigned long flags, 1718 void *context) 1719 { 1720 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1721 struct xilinx_dma_tx_descriptor *desc; 1722 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; 1723 u32 *app_w = (u32 *)context; 1724 struct scatterlist *sg; 1725 size_t copy; 1726 size_t sg_used; 1727 unsigned int i; 1728 1729 if (!is_slave_direction(direction)) 1730 return NULL; 1731 1732 /* Allocate a transaction descriptor. */ 1733 desc = xilinx_dma_alloc_tx_descriptor(chan); 1734 if (!desc) 1735 return NULL; 1736 1737 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1738 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1739 1740 /* Build transactions using information in the scatter gather list */ 1741 for_each_sg(sgl, sg, sg_len, i) { 1742 sg_used = 0; 1743 1744 /* Loop until the entire scatterlist entry is used */ 1745 while (sg_used < sg_dma_len(sg)) { 1746 struct xilinx_axidma_desc_hw *hw; 1747 1748 /* Get a free segment */ 1749 segment = xilinx_axidma_alloc_tx_segment(chan); 1750 if (!segment) 1751 goto error; 1752 1753 /* 1754 * Calculate the maximum number of bytes to transfer, 1755 * making sure it is less than the hw limit 1756 */ 1757 copy = min_t(size_t, sg_dma_len(sg) - sg_used, 1758 XILINX_DMA_MAX_TRANS_LEN); 1759 hw = &segment->hw; 1760 1761 /* Fill in the descriptor */ 1762 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1763 sg_used, 0); 1764 1765 hw->control = copy; 1766 1767 if (chan->direction == DMA_MEM_TO_DEV) { 1768 if (app_w) 1769 memcpy(hw->app, app_w, sizeof(u32) * 1770 XILINX_DMA_NUM_APP_WORDS); 1771 } 1772 1773 if (prev) 1774 prev->hw.next_desc = segment->phys; 1775 1776 prev = segment; 1777 sg_used += copy; 1778 1779 /* 1780 * Insert the segment into the descriptor segments 1781 * list. 1782 */ 1783 list_add_tail(&segment->node, &desc->segments); 1784 } 1785 } 1786 1787 segment = list_first_entry(&desc->segments, 1788 struct xilinx_axidma_tx_segment, node); 1789 desc->async_tx.phys = segment->phys; 1790 prev->hw.next_desc = segment->phys; 1791 1792 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1793 if (chan->direction == DMA_MEM_TO_DEV) { 1794 segment->hw.control |= XILINX_DMA_BD_SOP; 1795 segment = list_last_entry(&desc->segments, 1796 struct xilinx_axidma_tx_segment, 1797 node); 1798 segment->hw.control |= XILINX_DMA_BD_EOP; 1799 } 1800 1801 return &desc->async_tx; 1802 1803 error: 1804 xilinx_dma_free_tx_descriptor(chan, desc); 1805 return NULL; 1806 } 1807 1808 /** 1809 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1810 * @chan: DMA channel 1811 * @sgl: scatterlist to transfer to/from 1812 * @sg_len: number of entries in @scatterlist 1813 * @direction: DMA direction 1814 * @flags: transfer ack flags 1815 */ 1816 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1817 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1818 size_t period_len, enum dma_transfer_direction direction, 1819 unsigned long flags) 1820 { 1821 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1822 struct xilinx_dma_tx_descriptor *desc; 1823 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1824 size_t copy, sg_used; 1825 unsigned int num_periods; 1826 int i; 1827 u32 reg; 1828 1829 if (!period_len) 1830 return NULL; 1831 1832 num_periods = buf_len / period_len; 1833 1834 if (!num_periods) 1835 return NULL; 1836 1837 if (!is_slave_direction(direction)) 1838 return NULL; 1839 1840 /* Allocate a transaction descriptor. */ 1841 desc = xilinx_dma_alloc_tx_descriptor(chan); 1842 if (!desc) 1843 return NULL; 1844 1845 chan->direction = direction; 1846 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1847 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1848 1849 for (i = 0; i < num_periods; ++i) { 1850 sg_used = 0; 1851 1852 while (sg_used < period_len) { 1853 struct xilinx_axidma_desc_hw *hw; 1854 1855 /* Get a free segment */ 1856 segment = xilinx_axidma_alloc_tx_segment(chan); 1857 if (!segment) 1858 goto error; 1859 1860 /* 1861 * Calculate the maximum number of bytes to transfer, 1862 * making sure it is less than the hw limit 1863 */ 1864 copy = min_t(size_t, period_len - sg_used, 1865 XILINX_DMA_MAX_TRANS_LEN); 1866 hw = &segment->hw; 1867 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 1868 period_len * i); 1869 hw->control = copy; 1870 1871 if (prev) 1872 prev->hw.next_desc = segment->phys; 1873 1874 prev = segment; 1875 sg_used += copy; 1876 1877 /* 1878 * Insert the segment into the descriptor segments 1879 * list. 1880 */ 1881 list_add_tail(&segment->node, &desc->segments); 1882 } 1883 } 1884 1885 head_segment = list_first_entry(&desc->segments, 1886 struct xilinx_axidma_tx_segment, node); 1887 desc->async_tx.phys = head_segment->phys; 1888 1889 desc->cyclic = true; 1890 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1891 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 1892 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1893 1894 segment = list_last_entry(&desc->segments, 1895 struct xilinx_axidma_tx_segment, 1896 node); 1897 segment->hw.next_desc = (u32) head_segment->phys; 1898 1899 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1900 if (direction == DMA_MEM_TO_DEV) { 1901 head_segment->hw.control |= XILINX_DMA_BD_SOP; 1902 segment->hw.control |= XILINX_DMA_BD_EOP; 1903 } 1904 1905 return &desc->async_tx; 1906 1907 error: 1908 xilinx_dma_free_tx_descriptor(chan, desc); 1909 return NULL; 1910 } 1911 1912 /** 1913 * xilinx_dma_prep_interleaved - prepare a descriptor for a 1914 * DMA_SLAVE transaction 1915 * @dchan: DMA channel 1916 * @xt: Interleaved template pointer 1917 * @flags: transfer ack flags 1918 * 1919 * Return: Async transaction descriptor on success and NULL on failure 1920 */ 1921 static struct dma_async_tx_descriptor * 1922 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 1923 struct dma_interleaved_template *xt, 1924 unsigned long flags) 1925 { 1926 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1927 struct xilinx_dma_tx_descriptor *desc; 1928 struct xilinx_axidma_tx_segment *segment; 1929 struct xilinx_axidma_desc_hw *hw; 1930 1931 if (!is_slave_direction(xt->dir)) 1932 return NULL; 1933 1934 if (!xt->numf || !xt->sgl[0].size) 1935 return NULL; 1936 1937 if (xt->frame_size != 1) 1938 return NULL; 1939 1940 /* Allocate a transaction descriptor. */ 1941 desc = xilinx_dma_alloc_tx_descriptor(chan); 1942 if (!desc) 1943 return NULL; 1944 1945 chan->direction = xt->dir; 1946 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1947 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1948 1949 /* Get a free segment */ 1950 segment = xilinx_axidma_alloc_tx_segment(chan); 1951 if (!segment) 1952 goto error; 1953 1954 hw = &segment->hw; 1955 1956 /* Fill in the descriptor */ 1957 if (xt->dir != DMA_MEM_TO_DEV) 1958 hw->buf_addr = xt->dst_start; 1959 else 1960 hw->buf_addr = xt->src_start; 1961 1962 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 1963 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 1964 XILINX_DMA_BD_VSIZE_MASK; 1965 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 1966 XILINX_DMA_BD_STRIDE_MASK; 1967 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 1968 1969 /* 1970 * Insert the segment into the descriptor segments 1971 * list. 1972 */ 1973 list_add_tail(&segment->node, &desc->segments); 1974 1975 1976 segment = list_first_entry(&desc->segments, 1977 struct xilinx_axidma_tx_segment, node); 1978 desc->async_tx.phys = segment->phys; 1979 1980 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1981 if (xt->dir == DMA_MEM_TO_DEV) { 1982 segment->hw.control |= XILINX_DMA_BD_SOP; 1983 segment = list_last_entry(&desc->segments, 1984 struct xilinx_axidma_tx_segment, 1985 node); 1986 segment->hw.control |= XILINX_DMA_BD_EOP; 1987 } 1988 1989 return &desc->async_tx; 1990 1991 error: 1992 xilinx_dma_free_tx_descriptor(chan, desc); 1993 return NULL; 1994 } 1995 1996 /** 1997 * xilinx_dma_terminate_all - Halt the channel and free descriptors 1998 * @chan: Driver specific DMA Channel pointer 1999 */ 2000 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2001 { 2002 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2003 u32 reg; 2004 int err; 2005 2006 if (chan->cyclic) 2007 xilinx_dma_chan_reset(chan); 2008 2009 err = chan->stop_transfer(chan); 2010 if (err) { 2011 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2012 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 2013 chan->err = true; 2014 } 2015 2016 /* Remove and free all of the descriptors in the lists */ 2017 xilinx_dma_free_descriptors(chan); 2018 2019 if (chan->cyclic) { 2020 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2021 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2022 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2023 chan->cyclic = false; 2024 } 2025 2026 return 0; 2027 } 2028 2029 /** 2030 * xilinx_dma_channel_set_config - Configure VDMA channel 2031 * Run-time configuration for Axi VDMA, supports: 2032 * . halt the channel 2033 * . configure interrupt coalescing and inter-packet delay threshold 2034 * . start/stop parking 2035 * . enable genlock 2036 * 2037 * @dchan: DMA channel 2038 * @cfg: VDMA device configuration pointer 2039 * 2040 * Return: '0' on success and failure value on error 2041 */ 2042 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2043 struct xilinx_vdma_config *cfg) 2044 { 2045 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2046 u32 dmacr; 2047 2048 if (cfg->reset) 2049 return xilinx_dma_chan_reset(chan); 2050 2051 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2052 2053 chan->config.frm_dly = cfg->frm_dly; 2054 chan->config.park = cfg->park; 2055 2056 /* genlock settings */ 2057 chan->config.gen_lock = cfg->gen_lock; 2058 chan->config.master = cfg->master; 2059 2060 if (cfg->gen_lock && chan->genlock) { 2061 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2062 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2063 } 2064 2065 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2066 if (cfg->park) 2067 chan->config.park_frm = cfg->park_frm; 2068 else 2069 chan->config.park_frm = -1; 2070 2071 chan->config.coalesc = cfg->coalesc; 2072 chan->config.delay = cfg->delay; 2073 2074 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2075 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2076 chan->config.coalesc = cfg->coalesc; 2077 } 2078 2079 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2080 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2081 chan->config.delay = cfg->delay; 2082 } 2083 2084 /* FSync Source selection */ 2085 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2086 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2087 2088 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2089 2090 return 0; 2091 } 2092 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2093 2094 /* ----------------------------------------------------------------------------- 2095 * Probe and remove 2096 */ 2097 2098 /** 2099 * xilinx_dma_chan_remove - Per Channel remove function 2100 * @chan: Driver specific DMA channel 2101 */ 2102 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2103 { 2104 /* Disable all interrupts */ 2105 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2106 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2107 2108 if (chan->irq > 0) 2109 free_irq(chan->irq, chan); 2110 2111 tasklet_kill(&chan->tasklet); 2112 2113 list_del(&chan->common.device_node); 2114 } 2115 2116 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2117 struct clk **tx_clk, struct clk **rx_clk, 2118 struct clk **sg_clk, struct clk **tmp_clk) 2119 { 2120 int err; 2121 2122 *tmp_clk = NULL; 2123 2124 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2125 if (IS_ERR(*axi_clk)) { 2126 err = PTR_ERR(*axi_clk); 2127 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); 2128 return err; 2129 } 2130 2131 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2132 if (IS_ERR(*tx_clk)) 2133 *tx_clk = NULL; 2134 2135 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2136 if (IS_ERR(*rx_clk)) 2137 *rx_clk = NULL; 2138 2139 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2140 if (IS_ERR(*sg_clk)) 2141 *sg_clk = NULL; 2142 2143 err = clk_prepare_enable(*axi_clk); 2144 if (err) { 2145 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); 2146 return err; 2147 } 2148 2149 err = clk_prepare_enable(*tx_clk); 2150 if (err) { 2151 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 2152 goto err_disable_axiclk; 2153 } 2154 2155 err = clk_prepare_enable(*rx_clk); 2156 if (err) { 2157 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 2158 goto err_disable_txclk; 2159 } 2160 2161 err = clk_prepare_enable(*sg_clk); 2162 if (err) { 2163 dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err); 2164 goto err_disable_rxclk; 2165 } 2166 2167 return 0; 2168 2169 err_disable_rxclk: 2170 clk_disable_unprepare(*rx_clk); 2171 err_disable_txclk: 2172 clk_disable_unprepare(*tx_clk); 2173 err_disable_axiclk: 2174 clk_disable_unprepare(*axi_clk); 2175 2176 return err; 2177 } 2178 2179 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2180 struct clk **dev_clk, struct clk **tmp_clk, 2181 struct clk **tmp1_clk, struct clk **tmp2_clk) 2182 { 2183 int err; 2184 2185 *tmp_clk = NULL; 2186 *tmp1_clk = NULL; 2187 *tmp2_clk = NULL; 2188 2189 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2190 if (IS_ERR(*axi_clk)) { 2191 err = PTR_ERR(*axi_clk); 2192 dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err); 2193 return err; 2194 } 2195 2196 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2197 if (IS_ERR(*dev_clk)) { 2198 err = PTR_ERR(*dev_clk); 2199 dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err); 2200 return err; 2201 } 2202 2203 err = clk_prepare_enable(*axi_clk); 2204 if (err) { 2205 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); 2206 return err; 2207 } 2208 2209 err = clk_prepare_enable(*dev_clk); 2210 if (err) { 2211 dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err); 2212 goto err_disable_axiclk; 2213 } 2214 2215 return 0; 2216 2217 err_disable_axiclk: 2218 clk_disable_unprepare(*axi_clk); 2219 2220 return err; 2221 } 2222 2223 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2224 struct clk **tx_clk, struct clk **txs_clk, 2225 struct clk **rx_clk, struct clk **rxs_clk) 2226 { 2227 int err; 2228 2229 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2230 if (IS_ERR(*axi_clk)) { 2231 err = PTR_ERR(*axi_clk); 2232 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err); 2233 return err; 2234 } 2235 2236 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2237 if (IS_ERR(*tx_clk)) 2238 *tx_clk = NULL; 2239 2240 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2241 if (IS_ERR(*txs_clk)) 2242 *txs_clk = NULL; 2243 2244 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2245 if (IS_ERR(*rx_clk)) 2246 *rx_clk = NULL; 2247 2248 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2249 if (IS_ERR(*rxs_clk)) 2250 *rxs_clk = NULL; 2251 2252 err = clk_prepare_enable(*axi_clk); 2253 if (err) { 2254 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err); 2255 return err; 2256 } 2257 2258 err = clk_prepare_enable(*tx_clk); 2259 if (err) { 2260 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 2261 goto err_disable_axiclk; 2262 } 2263 2264 err = clk_prepare_enable(*txs_clk); 2265 if (err) { 2266 dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err); 2267 goto err_disable_txclk; 2268 } 2269 2270 err = clk_prepare_enable(*rx_clk); 2271 if (err) { 2272 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 2273 goto err_disable_txsclk; 2274 } 2275 2276 err = clk_prepare_enable(*rxs_clk); 2277 if (err) { 2278 dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err); 2279 goto err_disable_rxclk; 2280 } 2281 2282 return 0; 2283 2284 err_disable_rxclk: 2285 clk_disable_unprepare(*rx_clk); 2286 err_disable_txsclk: 2287 clk_disable_unprepare(*txs_clk); 2288 err_disable_txclk: 2289 clk_disable_unprepare(*tx_clk); 2290 err_disable_axiclk: 2291 clk_disable_unprepare(*axi_clk); 2292 2293 return err; 2294 } 2295 2296 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2297 { 2298 clk_disable_unprepare(xdev->rxs_clk); 2299 clk_disable_unprepare(xdev->rx_clk); 2300 clk_disable_unprepare(xdev->txs_clk); 2301 clk_disable_unprepare(xdev->tx_clk); 2302 clk_disable_unprepare(xdev->axi_clk); 2303 } 2304 2305 /** 2306 * xilinx_dma_chan_probe - Per Channel Probing 2307 * It get channel features from the device tree entry and 2308 * initialize special channel handling routines 2309 * 2310 * @xdev: Driver specific device structure 2311 * @node: Device node 2312 * 2313 * Return: '0' on success and failure value on error 2314 */ 2315 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2316 struct device_node *node, int chan_id) 2317 { 2318 struct xilinx_dma_chan *chan; 2319 bool has_dre = false; 2320 u32 value, width; 2321 int err; 2322 2323 /* Allocate and initialize the channel structure */ 2324 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2325 if (!chan) 2326 return -ENOMEM; 2327 2328 chan->dev = xdev->dev; 2329 chan->xdev = xdev; 2330 chan->has_sg = xdev->has_sg; 2331 chan->desc_pendingcount = 0x0; 2332 chan->ext_addr = xdev->ext_addr; 2333 2334 spin_lock_init(&chan->lock); 2335 INIT_LIST_HEAD(&chan->pending_list); 2336 INIT_LIST_HEAD(&chan->done_list); 2337 INIT_LIST_HEAD(&chan->active_list); 2338 2339 /* Retrieve the channel properties from the device tree */ 2340 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2341 2342 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2343 2344 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2345 if (err) { 2346 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2347 return err; 2348 } 2349 width = value >> 3; /* Convert bits to bytes */ 2350 2351 /* If data width is greater than 8 bytes, DRE is not in hw */ 2352 if (width > 8) 2353 has_dre = false; 2354 2355 if (!has_dre) 2356 xdev->common.copy_align = fls(width - 1); 2357 2358 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2359 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2360 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2361 chan->direction = DMA_MEM_TO_DEV; 2362 chan->id = chan_id; 2363 chan->tdest = chan_id; 2364 2365 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2366 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2367 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2368 2369 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2370 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2371 chan->flush_on_fsync = true; 2372 } 2373 } else if (of_device_is_compatible(node, 2374 "xlnx,axi-vdma-s2mm-channel") || 2375 of_device_is_compatible(node, 2376 "xlnx,axi-dma-s2mm-channel")) { 2377 chan->direction = DMA_DEV_TO_MEM; 2378 chan->id = chan_id; 2379 chan->tdest = chan_id - xdev->nr_channels; 2380 2381 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2382 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2383 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2384 2385 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2386 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2387 chan->flush_on_fsync = true; 2388 } 2389 } else { 2390 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2391 return -EINVAL; 2392 } 2393 2394 /* Request the interrupt */ 2395 chan->irq = irq_of_parse_and_map(node, 0); 2396 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2397 "xilinx-dma-controller", chan); 2398 if (err) { 2399 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2400 return err; 2401 } 2402 2403 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2404 chan->start_transfer = xilinx_dma_start_transfer; 2405 chan->stop_transfer = xilinx_dma_stop_transfer; 2406 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2407 chan->start_transfer = xilinx_cdma_start_transfer; 2408 chan->stop_transfer = xilinx_cdma_stop_transfer; 2409 } else { 2410 chan->start_transfer = xilinx_vdma_start_transfer; 2411 chan->stop_transfer = xilinx_dma_stop_transfer; 2412 } 2413 2414 /* Initialize the tasklet */ 2415 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2416 (unsigned long)chan); 2417 2418 /* 2419 * Initialize the DMA channel and add it to the DMA engine channels 2420 * list. 2421 */ 2422 chan->common.device = &xdev->common; 2423 2424 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2425 xdev->chan[chan->id] = chan; 2426 2427 /* Reset the channel */ 2428 err = xilinx_dma_chan_reset(chan); 2429 if (err < 0) { 2430 dev_err(xdev->dev, "Reset channel failed\n"); 2431 return err; 2432 } 2433 2434 return 0; 2435 } 2436 2437 /** 2438 * xilinx_dma_child_probe - Per child node probe 2439 * It get number of dma-channels per child node from 2440 * device-tree and initializes all the channels. 2441 * 2442 * @xdev: Driver specific device structure 2443 * @node: Device node 2444 * 2445 * Return: 0 always. 2446 */ 2447 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2448 struct device_node *node) { 2449 int ret, i, nr_channels = 1; 2450 2451 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2452 if ((ret < 0) && xdev->mcdma) 2453 dev_warn(xdev->dev, "missing dma-channels property\n"); 2454 2455 for (i = 0; i < nr_channels; i++) 2456 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2457 2458 xdev->nr_channels += nr_channels; 2459 2460 return 0; 2461 } 2462 2463 /** 2464 * of_dma_xilinx_xlate - Translation function 2465 * @dma_spec: Pointer to DMA specifier as found in the device tree 2466 * @ofdma: Pointer to DMA controller data 2467 * 2468 * Return: DMA channel pointer on success and NULL on error 2469 */ 2470 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2471 struct of_dma *ofdma) 2472 { 2473 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2474 int chan_id = dma_spec->args[0]; 2475 2476 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2477 return NULL; 2478 2479 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2480 } 2481 2482 static const struct xilinx_dma_config axidma_config = { 2483 .dmatype = XDMA_TYPE_AXIDMA, 2484 .clk_init = axidma_clk_init, 2485 }; 2486 2487 static const struct xilinx_dma_config axicdma_config = { 2488 .dmatype = XDMA_TYPE_CDMA, 2489 .clk_init = axicdma_clk_init, 2490 }; 2491 2492 static const struct xilinx_dma_config axivdma_config = { 2493 .dmatype = XDMA_TYPE_VDMA, 2494 .clk_init = axivdma_clk_init, 2495 }; 2496 2497 static const struct of_device_id xilinx_dma_of_ids[] = { 2498 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2499 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2500 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2501 {} 2502 }; 2503 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2504 2505 /** 2506 * xilinx_dma_probe - Driver probe function 2507 * @pdev: Pointer to the platform_device structure 2508 * 2509 * Return: '0' on success and failure value on error 2510 */ 2511 static int xilinx_dma_probe(struct platform_device *pdev) 2512 { 2513 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2514 struct clk **, struct clk **, struct clk **) 2515 = axivdma_clk_init; 2516 struct device_node *node = pdev->dev.of_node; 2517 struct xilinx_dma_device *xdev; 2518 struct device_node *child, *np = pdev->dev.of_node; 2519 struct resource *io; 2520 u32 num_frames, addr_width; 2521 int i, err; 2522 2523 /* Allocate and initialize the DMA engine structure */ 2524 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2525 if (!xdev) 2526 return -ENOMEM; 2527 2528 xdev->dev = &pdev->dev; 2529 if (np) { 2530 const struct of_device_id *match; 2531 2532 match = of_match_node(xilinx_dma_of_ids, np); 2533 if (match && match->data) { 2534 xdev->dma_config = match->data; 2535 clk_init = xdev->dma_config->clk_init; 2536 } 2537 } 2538 2539 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2540 &xdev->rx_clk, &xdev->rxs_clk); 2541 if (err) 2542 return err; 2543 2544 /* Request and map I/O memory */ 2545 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2546 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2547 if (IS_ERR(xdev->regs)) 2548 return PTR_ERR(xdev->regs); 2549 2550 /* Retrieve the DMA engine properties from the device tree */ 2551 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 2552 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2553 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2554 2555 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2556 err = of_property_read_u32(node, "xlnx,num-fstores", 2557 &num_frames); 2558 if (err < 0) { 2559 dev_err(xdev->dev, 2560 "missing xlnx,num-fstores property\n"); 2561 return err; 2562 } 2563 2564 err = of_property_read_u32(node, "xlnx,flush-fsync", 2565 &xdev->flush_on_fsync); 2566 if (err < 0) 2567 dev_warn(xdev->dev, 2568 "missing xlnx,flush-fsync property\n"); 2569 } 2570 2571 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2572 if (err < 0) 2573 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2574 2575 if (addr_width > 32) 2576 xdev->ext_addr = true; 2577 else 2578 xdev->ext_addr = false; 2579 2580 /* Set the dma mask bits */ 2581 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2582 2583 /* Initialize the DMA engine */ 2584 xdev->common.dev = &pdev->dev; 2585 2586 INIT_LIST_HEAD(&xdev->common.channels); 2587 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2588 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2589 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2590 } 2591 2592 xdev->common.device_alloc_chan_resources = 2593 xilinx_dma_alloc_chan_resources; 2594 xdev->common.device_free_chan_resources = 2595 xilinx_dma_free_chan_resources; 2596 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2597 xdev->common.device_tx_status = xilinx_dma_tx_status; 2598 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2599 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2600 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2601 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2602 xdev->common.device_prep_dma_cyclic = 2603 xilinx_dma_prep_dma_cyclic; 2604 xdev->common.device_prep_interleaved_dma = 2605 xilinx_dma_prep_interleaved; 2606 /* Residue calculation is supported by only AXI DMA */ 2607 xdev->common.residue_granularity = 2608 DMA_RESIDUE_GRANULARITY_SEGMENT; 2609 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2610 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2611 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2612 } else { 2613 xdev->common.device_prep_interleaved_dma = 2614 xilinx_vdma_dma_prep_interleaved; 2615 } 2616 2617 platform_set_drvdata(pdev, xdev); 2618 2619 /* Initialize the channels */ 2620 for_each_child_of_node(node, child) { 2621 err = xilinx_dma_child_probe(xdev, child); 2622 if (err < 0) 2623 goto disable_clks; 2624 } 2625 2626 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2627 for (i = 0; i < xdev->nr_channels; i++) 2628 if (xdev->chan[i]) 2629 xdev->chan[i]->num_frms = num_frames; 2630 } 2631 2632 /* Register the DMA engine with the core */ 2633 dma_async_device_register(&xdev->common); 2634 2635 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2636 xdev); 2637 if (err < 0) { 2638 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2639 dma_async_device_unregister(&xdev->common); 2640 goto error; 2641 } 2642 2643 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2644 2645 return 0; 2646 2647 disable_clks: 2648 xdma_disable_allclks(xdev); 2649 error: 2650 for (i = 0; i < xdev->nr_channels; i++) 2651 if (xdev->chan[i]) 2652 xilinx_dma_chan_remove(xdev->chan[i]); 2653 2654 return err; 2655 } 2656 2657 /** 2658 * xilinx_dma_remove - Driver remove function 2659 * @pdev: Pointer to the platform_device structure 2660 * 2661 * Return: Always '0' 2662 */ 2663 static int xilinx_dma_remove(struct platform_device *pdev) 2664 { 2665 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2666 int i; 2667 2668 of_dma_controller_free(pdev->dev.of_node); 2669 2670 dma_async_device_unregister(&xdev->common); 2671 2672 for (i = 0; i < xdev->nr_channels; i++) 2673 if (xdev->chan[i]) 2674 xilinx_dma_chan_remove(xdev->chan[i]); 2675 2676 xdma_disable_allclks(xdev); 2677 2678 return 0; 2679 } 2680 2681 static struct platform_driver xilinx_vdma_driver = { 2682 .driver = { 2683 .name = "xilinx-vdma", 2684 .of_match_table = xilinx_dma_of_ids, 2685 }, 2686 .probe = xilinx_dma_probe, 2687 .remove = xilinx_dma_remove, 2688 }; 2689 2690 module_platform_driver(xilinx_vdma_driver); 2691 2692 MODULE_AUTHOR("Xilinx, Inc."); 2693 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2694 MODULE_LICENSE("GPL v2"); 2695