1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * DMA driver for Xilinx Video DMA Engine 4 * 5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. 6 * 7 * Based on the Freescale DMA driver. 8 * 9 * Description: 10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 11 * core that provides high-bandwidth direct memory access between memory 12 * and AXI4-Stream type video target peripherals. The core provides efficient 13 * two dimensional DMA operations with independent asynchronous read (S2MM) 14 * and write (MM2S) channel operation. It can be configured to have either 15 * one channel or two channels. If configured as two channels, one is to 16 * transmit to the video device (MM2S) and another is to receive from the 17 * video device (S2MM). Initialization, status, interrupt and management 18 * registers are accessed through an AXI4-Lite slave interface. 19 * 20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 21 * provides high-bandwidth one dimensional direct memory access between memory 22 * and AXI4-Stream target peripherals. It supports one receive and one 23 * transmit channel, both of them optional at synthesis time. 24 * 25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 26 * Access (DMA) between a memory-mapped source address and a memory-mapped 27 * destination address. 28 * 29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft 30 * Xilinx IP that provides high-bandwidth direct memory access between 31 * memory and AXI4-Stream target peripherals. It provides scatter gather 32 * (SG) interface with multiple channels independent configuration support. 33 * 34 */ 35 36 #include <linux/bitops.h> 37 #include <linux/dmapool.h> 38 #include <linux/dma/xilinx_dma.h> 39 #include <linux/init.h> 40 #include <linux/interrupt.h> 41 #include <linux/io.h> 42 #include <linux/iopoll.h> 43 #include <linux/module.h> 44 #include <linux/of.h> 45 #include <linux/of_dma.h> 46 #include <linux/of_irq.h> 47 #include <linux/platform_device.h> 48 #include <linux/slab.h> 49 #include <linux/string_choices.h> 50 #include <linux/clk.h> 51 #include <linux/io-64-nonatomic-lo-hi.h> 52 53 #include "../dmaengine.h" 54 55 /* Register/Descriptor Offsets */ 56 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 57 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 58 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 59 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 60 61 /* Control Registers */ 62 #define XILINX_DMA_REG_DMACR 0x0000 63 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 64 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 65 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 66 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 67 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 68 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 69 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 70 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 71 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 72 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 73 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 74 #define XILINX_DMA_DMACR_RESET BIT(2) 75 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 76 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 77 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 78 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) 79 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) 80 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) 81 82 #define XILINX_DMA_REG_DMASR 0x0004 83 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 84 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 85 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 86 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 87 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 88 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 89 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 90 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 91 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 92 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 93 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 94 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 95 #define XILINX_DMA_DMASR_SG_MASK BIT(3) 96 #define XILINX_DMA_DMASR_IDLE BIT(1) 97 #define XILINX_DMA_DMASR_HALTED BIT(0) 98 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 99 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 100 101 #define XILINX_DMA_REG_CURDESC 0x0008 102 #define XILINX_DMA_REG_TAILDESC 0x0010 103 #define XILINX_DMA_REG_REG_INDEX 0x0014 104 #define XILINX_DMA_REG_FRMSTORE 0x0018 105 #define XILINX_DMA_REG_THRESHOLD 0x001c 106 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 107 #define XILINX_DMA_REG_PARK_PTR 0x0028 108 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 109 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) 110 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 111 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) 112 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 113 114 /* Register Direct Mode Registers */ 115 #define XILINX_DMA_REG_VSIZE 0x0000 116 #define XILINX_DMA_VSIZE_MASK GENMASK(12, 0) 117 #define XILINX_DMA_REG_HSIZE 0x0004 118 #define XILINX_DMA_HSIZE_MASK GENMASK(15, 0) 119 120 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 121 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 122 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 123 124 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 125 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 126 127 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 128 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 129 130 /* HW specific definitions */ 131 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 132 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 133 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 134 #define XILINX_DMA_DFAULT_ADDRWIDTH 0x20 135 136 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 137 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 138 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 139 XILINX_DMA_DMASR_ERR_IRQ) 140 141 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 142 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 143 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 144 XILINX_DMA_DMASR_SG_DEC_ERR | \ 145 XILINX_DMA_DMASR_SG_SLV_ERR | \ 146 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 147 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 148 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 149 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 150 XILINX_DMA_DMASR_DMA_INT_ERR) 151 152 /* 153 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 154 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 155 * is enabled in the h/w system. 156 */ 157 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 158 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 159 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 160 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 161 XILINX_DMA_DMASR_DMA_INT_ERR) 162 163 /* Axi VDMA Flush on Fsync bits */ 164 #define XILINX_DMA_FLUSH_S2MM 3 165 #define XILINX_DMA_FLUSH_MM2S 2 166 #define XILINX_DMA_FLUSH_BOTH 1 167 168 /* Delay loop counter to prevent hardware failure */ 169 #define XILINX_DMA_LOOP_COUNT 1000000 170 171 /* AXI DMA Specific Registers/Offsets */ 172 #define XILINX_DMA_REG_SRCDSTADDR 0x18 173 #define XILINX_DMA_REG_BTT 0x28 174 175 /* AXI DMA Specific Masks/Bit fields */ 176 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 177 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 178 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 179 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 180 #define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24) 181 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 182 #define XILINX_DMA_CR_COALESCE_SHIFT 16 183 #define XILINX_DMA_CR_DELAY_SHIFT 24 184 #define XILINX_DMA_BD_SOP BIT(27) 185 #define XILINX_DMA_BD_EOP BIT(26) 186 #define XILINX_DMA_BD_COMP_MASK BIT(31) 187 #define XILINX_DMA_COALESCE_MAX 255 188 #define XILINX_DMA_NUM_DESCS 512 189 #define XILINX_DMA_NUM_APP_WORDS 5 190 191 /* AXI CDMA Specific Registers/Offsets */ 192 #define XILINX_CDMA_REG_SRCADDR 0x18 193 #define XILINX_CDMA_REG_DSTADDR 0x20 194 195 /* AXI CDMA Specific Masks */ 196 #define XILINX_CDMA_CR_SGMODE BIT(3) 197 198 #define xilinx_prep_dma_addr_t(addr) \ 199 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) 200 201 /* AXI MCDMA Specific Registers/Offsets */ 202 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 203 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 204 #define XILINX_MCDMA_CHEN_OFFSET 0x0008 205 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 206 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 207 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 208 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) 209 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) 210 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) 211 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) 212 213 /* AXI MCDMA Specific Masks/Shifts */ 214 #define XILINX_MCDMA_COALESCE_SHIFT 16 215 #define XILINX_MCDMA_COALESCE_MAX 24 216 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) 217 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) 218 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) 219 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) 220 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) 221 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) 222 #define XILINX_MCDMA_BD_EOP BIT(30) 223 #define XILINX_MCDMA_BD_SOP BIT(31) 224 225 /** 226 * struct xilinx_vdma_desc_hw - Hardware Descriptor 227 * @next_desc: Next Descriptor Pointer @0x00 228 * @pad1: Reserved @0x04 229 * @buf_addr: Buffer address @0x08 230 * @buf_addr_msb: MSB of Buffer address @0x0C 231 * @vsize: Vertical Size @0x10 232 * @hsize: Horizontal Size @0x14 233 * @stride: Number of bytes between the first 234 * pixels of each horizontal line @0x18 235 */ 236 struct xilinx_vdma_desc_hw { 237 u32 next_desc; 238 u32 pad1; 239 u32 buf_addr; 240 u32 buf_addr_msb; 241 u32 vsize; 242 u32 hsize; 243 u32 stride; 244 } __aligned(64); 245 246 /** 247 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 248 * @next_desc: Next Descriptor Pointer @0x00 249 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 250 * @buf_addr: Buffer address @0x08 251 * @buf_addr_msb: MSB of Buffer address @0x0C 252 * @reserved1: Reserved @0x10 253 * @reserved2: Reserved @0x14 254 * @control: Control field @0x18 255 * @status: Status field @0x1C 256 * @app: APP Fields @0x20 - 0x30 257 */ 258 struct xilinx_axidma_desc_hw { 259 u32 next_desc; 260 u32 next_desc_msb; 261 u32 buf_addr; 262 u32 buf_addr_msb; 263 u32 reserved1; 264 u32 reserved2; 265 u32 control; 266 u32 status; 267 u32 app[XILINX_DMA_NUM_APP_WORDS]; 268 } __aligned(64); 269 270 /** 271 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA 272 * @next_desc: Next Descriptor Pointer @0x00 273 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 274 * @buf_addr: Buffer address @0x08 275 * @buf_addr_msb: MSB of Buffer address @0x0C 276 * @rsvd: Reserved field @0x10 277 * @control: Control Information field @0x14 278 * @status: Status field @0x18 279 * @sideband_status: Status of sideband signals @0x1C 280 * @app: APP Fields @0x20 - 0x30 281 */ 282 struct xilinx_aximcdma_desc_hw { 283 u32 next_desc; 284 u32 next_desc_msb; 285 u32 buf_addr; 286 u32 buf_addr_msb; 287 u32 rsvd; 288 u32 control; 289 u32 status; 290 u32 sideband_status; 291 u32 app[XILINX_DMA_NUM_APP_WORDS]; 292 } __aligned(64); 293 294 /** 295 * struct xilinx_cdma_desc_hw - Hardware Descriptor 296 * @next_desc: Next Descriptor Pointer @0x00 297 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 298 * @src_addr: Source address @0x08 299 * @src_addr_msb: Source address MSB @0x0C 300 * @dest_addr: Destination address @0x10 301 * @dest_addr_msb: Destination address MSB @0x14 302 * @control: Control field @0x18 303 * @status: Status field @0x1C 304 */ 305 struct xilinx_cdma_desc_hw { 306 u32 next_desc; 307 u32 next_desc_msb; 308 u32 src_addr; 309 u32 src_addr_msb; 310 u32 dest_addr; 311 u32 dest_addr_msb; 312 u32 control; 313 u32 status; 314 } __aligned(64); 315 316 /** 317 * struct xilinx_vdma_tx_segment - Descriptor segment 318 * @hw: Hardware descriptor 319 * @node: Node in the descriptor segments list 320 * @phys: Physical address of segment 321 */ 322 struct xilinx_vdma_tx_segment { 323 struct xilinx_vdma_desc_hw hw; 324 struct list_head node; 325 dma_addr_t phys; 326 } __aligned(64); 327 328 /** 329 * struct xilinx_axidma_tx_segment - Descriptor segment 330 * @hw: Hardware descriptor 331 * @node: Node in the descriptor segments list 332 * @phys: Physical address of segment 333 */ 334 struct xilinx_axidma_tx_segment { 335 struct xilinx_axidma_desc_hw hw; 336 struct list_head node; 337 dma_addr_t phys; 338 } __aligned(64); 339 340 /** 341 * struct xilinx_aximcdma_tx_segment - Descriptor segment 342 * @hw: Hardware descriptor 343 * @node: Node in the descriptor segments list 344 * @phys: Physical address of segment 345 */ 346 struct xilinx_aximcdma_tx_segment { 347 struct xilinx_aximcdma_desc_hw hw; 348 struct list_head node; 349 dma_addr_t phys; 350 } __aligned(64); 351 352 /** 353 * struct xilinx_cdma_tx_segment - Descriptor segment 354 * @hw: Hardware descriptor 355 * @node: Node in the descriptor segments list 356 * @phys: Physical address of segment 357 */ 358 struct xilinx_cdma_tx_segment { 359 struct xilinx_cdma_desc_hw hw; 360 struct list_head node; 361 dma_addr_t phys; 362 } __aligned(64); 363 364 /** 365 * struct xilinx_dma_tx_descriptor - Per Transaction structure 366 * @async_tx: Async transaction descriptor 367 * @segments: TX segments list 368 * @node: Node in the channel descriptors list 369 * @cyclic: Check for cyclic transfers. 370 * @err: Whether the descriptor has an error. 371 * @residue: Residue of the completed descriptor 372 */ 373 struct xilinx_dma_tx_descriptor { 374 struct dma_async_tx_descriptor async_tx; 375 struct list_head segments; 376 struct list_head node; 377 bool cyclic; 378 bool err; 379 u32 residue; 380 }; 381 382 /** 383 * struct xilinx_dma_chan - Driver specific DMA channel structure 384 * @xdev: Driver specific device structure 385 * @ctrl_offset: Control registers offset 386 * @desc_offset: TX descriptor registers offset 387 * @lock: Descriptor operation lock 388 * @pending_list: Descriptors waiting 389 * @active_list: Descriptors ready to submit 390 * @done_list: Complete descriptors 391 * @free_seg_list: Free descriptors 392 * @common: DMA common channel 393 * @desc_pool: Descriptors pool 394 * @dev: The dma device 395 * @irq: Channel IRQ 396 * @id: Channel ID 397 * @direction: Transfer direction 398 * @num_frms: Number of frames 399 * @has_sg: Support scatter transfers 400 * @cyclic: Check for cyclic transfers. 401 * @genlock: Support genlock mode 402 * @err: Channel has errors 403 * @idle: Check for channel idle 404 * @terminating: Check for channel being synchronized by user 405 * @tasklet: Cleanup work after irq 406 * @config: Device configuration info 407 * @flush_on_fsync: Flush on Frame sync 408 * @desc_pendingcount: Descriptor pending count 409 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 410 * @desc_submitcount: Descriptor h/w submitted count 411 * @seg_v: Statically allocated segments base 412 * @seg_mv: Statically allocated segments base for MCDMA 413 * @seg_p: Physical allocated segments base 414 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 415 * @cyclic_seg_p: Physical allocated segments base for cyclic dma 416 * @start_transfer: Differentiate b/w DMA IP's transfer 417 * @stop_transfer: Differentiate b/w DMA IP's quiesce 418 * @tdest: TDEST value for mcdma 419 * @has_vflip: S2MM vertical flip 420 * @irq_delay: Interrupt delay timeout 421 */ 422 struct xilinx_dma_chan { 423 struct xilinx_dma_device *xdev; 424 u32 ctrl_offset; 425 u32 desc_offset; 426 spinlock_t lock; 427 struct list_head pending_list; 428 struct list_head active_list; 429 struct list_head done_list; 430 struct list_head free_seg_list; 431 struct dma_chan common; 432 struct dma_pool *desc_pool; 433 struct device *dev; 434 int irq; 435 int id; 436 enum dma_transfer_direction direction; 437 int num_frms; 438 bool has_sg; 439 bool cyclic; 440 bool genlock; 441 bool err; 442 bool idle; 443 bool terminating; 444 struct tasklet_struct tasklet; 445 struct xilinx_vdma_config config; 446 bool flush_on_fsync; 447 u32 desc_pendingcount; 448 bool ext_addr; 449 u32 desc_submitcount; 450 struct xilinx_axidma_tx_segment *seg_v; 451 struct xilinx_aximcdma_tx_segment *seg_mv; 452 dma_addr_t seg_p; 453 struct xilinx_axidma_tx_segment *cyclic_seg_v; 454 dma_addr_t cyclic_seg_p; 455 void (*start_transfer)(struct xilinx_dma_chan *chan); 456 int (*stop_transfer)(struct xilinx_dma_chan *chan); 457 u16 tdest; 458 bool has_vflip; 459 u8 irq_delay; 460 }; 461 462 /** 463 * enum xdma_ip_type - DMA IP type. 464 * 465 * @XDMA_TYPE_AXIDMA: Axi dma ip. 466 * @XDMA_TYPE_CDMA: Axi cdma ip. 467 * @XDMA_TYPE_VDMA: Axi vdma ip. 468 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. 469 * 470 */ 471 enum xdma_ip_type { 472 XDMA_TYPE_AXIDMA = 0, 473 XDMA_TYPE_CDMA, 474 XDMA_TYPE_VDMA, 475 XDMA_TYPE_AXIMCDMA 476 }; 477 478 struct xilinx_dma_config { 479 enum xdma_ip_type dmatype; 480 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 481 struct clk **tx_clk, struct clk **txs_clk, 482 struct clk **rx_clk, struct clk **rxs_clk); 483 irqreturn_t (*irq_handler)(int irq, void *data); 484 const int max_channels; 485 }; 486 487 /** 488 * struct xilinx_dma_device - DMA device structure 489 * @regs: I/O mapped base address 490 * @dev: Device Structure 491 * @common: DMA device structure 492 * @chan: Driver specific DMA channel 493 * @flush_on_fsync: Flush on frame sync 494 * @ext_addr: Indicates 64 bit addressing is supported by dma device 495 * @pdev: Platform device structure pointer 496 * @dma_config: DMA config structure 497 * @axi_clk: DMA Axi4-lite interace clock 498 * @tx_clk: DMA mm2s clock 499 * @txs_clk: DMA mm2s stream clock 500 * @rx_clk: DMA s2mm clock 501 * @rxs_clk: DMA s2mm stream clock 502 * @s2mm_chan_id: DMA s2mm channel identifier 503 * @mm2s_chan_id: DMA mm2s channel identifier 504 * @max_buffer_len: Max buffer length 505 * @has_axistream_connected: AXI DMA connected to AXI Stream IP 506 */ 507 struct xilinx_dma_device { 508 void __iomem *regs; 509 struct device *dev; 510 struct dma_device common; 511 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE]; 512 u32 flush_on_fsync; 513 bool ext_addr; 514 struct platform_device *pdev; 515 const struct xilinx_dma_config *dma_config; 516 struct clk *axi_clk; 517 struct clk *tx_clk; 518 struct clk *txs_clk; 519 struct clk *rx_clk; 520 struct clk *rxs_clk; 521 u32 s2mm_chan_id; 522 u32 mm2s_chan_id; 523 u32 max_buffer_len; 524 bool has_axistream_connected; 525 }; 526 527 /* Macros */ 528 #define to_xilinx_chan(chan) \ 529 container_of(chan, struct xilinx_dma_chan, common) 530 #define to_dma_tx_descriptor(tx) \ 531 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 532 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 533 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \ 534 val, cond, delay_us, timeout_us) 535 536 /* IO accessors */ 537 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 538 { 539 return ioread32(chan->xdev->regs + reg); 540 } 541 542 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 543 { 544 iowrite32(value, chan->xdev->regs + reg); 545 } 546 547 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 548 u32 value) 549 { 550 dma_write(chan, chan->desc_offset + reg, value); 551 } 552 553 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 554 { 555 return dma_read(chan, chan->ctrl_offset + reg); 556 } 557 558 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 559 u32 value) 560 { 561 dma_write(chan, chan->ctrl_offset + reg, value); 562 } 563 564 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 565 u32 clr) 566 { 567 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 568 } 569 570 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 571 u32 set) 572 { 573 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 574 } 575 576 /** 577 * vdma_desc_write_64 - 64-bit descriptor write 578 * @chan: Driver specific VDMA channel 579 * @reg: Register to write 580 * @value_lsb: lower address of the descriptor. 581 * @value_msb: upper address of the descriptor. 582 * 583 * Since vdma driver is trying to write to a register offset which is not a 584 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 585 * instead of a single 64 bit register write. 586 */ 587 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 588 u32 value_lsb, u32 value_msb) 589 { 590 /* Write the lsb 32 bits*/ 591 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 592 593 /* Write the msb 32 bits */ 594 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 595 } 596 597 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 598 { 599 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 600 } 601 602 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 603 dma_addr_t addr) 604 { 605 if (chan->ext_addr) 606 dma_writeq(chan, reg, addr); 607 else 608 dma_ctrl_write(chan, reg, addr); 609 } 610 611 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 612 struct xilinx_axidma_desc_hw *hw, 613 dma_addr_t buf_addr, size_t sg_used, 614 size_t period_len) 615 { 616 if (chan->ext_addr) { 617 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 618 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 619 period_len); 620 } else { 621 hw->buf_addr = buf_addr + sg_used + period_len; 622 } 623 } 624 625 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, 626 struct xilinx_aximcdma_desc_hw *hw, 627 dma_addr_t buf_addr, size_t sg_used) 628 { 629 if (chan->ext_addr) { 630 hw->buf_addr = lower_32_bits(buf_addr + sg_used); 631 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); 632 } else { 633 hw->buf_addr = buf_addr + sg_used; 634 } 635 } 636 637 /** 638 * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length 639 * @tx: async transaction descriptor 640 * @payload_len: metadata payload length 641 * @max_len: metadata max length 642 * Return: The app field pointer. 643 */ 644 static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx, 645 size_t *payload_len, size_t *max_len) 646 { 647 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 648 struct xilinx_axidma_tx_segment *seg; 649 650 *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS; 651 seg = list_first_entry(&desc->segments, 652 struct xilinx_axidma_tx_segment, node); 653 return seg->hw.app; 654 } 655 656 static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = { 657 .get_ptr = xilinx_dma_get_metadata_ptr, 658 }; 659 660 /* ----------------------------------------------------------------------------- 661 * Descriptors and segments alloc and free 662 */ 663 664 /** 665 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 666 * @chan: Driver specific DMA channel 667 * 668 * Return: The allocated segment on success and NULL on failure. 669 */ 670 static struct xilinx_vdma_tx_segment * 671 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 672 { 673 struct xilinx_vdma_tx_segment *segment; 674 dma_addr_t phys; 675 676 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 677 if (!segment) 678 return NULL; 679 680 segment->phys = phys; 681 682 return segment; 683 } 684 685 /** 686 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 687 * @chan: Driver specific DMA channel 688 * 689 * Return: The allocated segment on success and NULL on failure. 690 */ 691 static struct xilinx_cdma_tx_segment * 692 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 693 { 694 struct xilinx_cdma_tx_segment *segment; 695 dma_addr_t phys; 696 697 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 698 if (!segment) 699 return NULL; 700 701 segment->phys = phys; 702 703 return segment; 704 } 705 706 /** 707 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 708 * @chan: Driver specific DMA channel 709 * 710 * Return: The allocated segment on success and NULL on failure. 711 */ 712 static struct xilinx_axidma_tx_segment * 713 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 714 { 715 struct xilinx_axidma_tx_segment *segment = NULL; 716 unsigned long flags; 717 718 spin_lock_irqsave(&chan->lock, flags); 719 if (!list_empty(&chan->free_seg_list)) { 720 segment = list_first_entry(&chan->free_seg_list, 721 struct xilinx_axidma_tx_segment, 722 node); 723 list_del(&segment->node); 724 } 725 spin_unlock_irqrestore(&chan->lock, flags); 726 727 if (!segment) 728 dev_dbg(chan->dev, "Could not find free tx segment\n"); 729 730 return segment; 731 } 732 733 /** 734 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment 735 * @chan: Driver specific DMA channel 736 * 737 * Return: The allocated segment on success and NULL on failure. 738 */ 739 static struct xilinx_aximcdma_tx_segment * 740 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 741 { 742 struct xilinx_aximcdma_tx_segment *segment = NULL; 743 unsigned long flags; 744 745 spin_lock_irqsave(&chan->lock, flags); 746 if (!list_empty(&chan->free_seg_list)) { 747 segment = list_first_entry(&chan->free_seg_list, 748 struct xilinx_aximcdma_tx_segment, 749 node); 750 list_del(&segment->node); 751 } 752 spin_unlock_irqrestore(&chan->lock, flags); 753 754 return segment; 755 } 756 757 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 758 { 759 u32 next_desc = hw->next_desc; 760 u32 next_desc_msb = hw->next_desc_msb; 761 762 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 763 764 hw->next_desc = next_desc; 765 hw->next_desc_msb = next_desc_msb; 766 } 767 768 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) 769 { 770 u32 next_desc = hw->next_desc; 771 u32 next_desc_msb = hw->next_desc_msb; 772 773 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); 774 775 hw->next_desc = next_desc; 776 hw->next_desc_msb = next_desc_msb; 777 } 778 779 /** 780 * xilinx_dma_free_tx_segment - Free transaction segment 781 * @chan: Driver specific DMA channel 782 * @segment: DMA transaction segment 783 */ 784 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 785 struct xilinx_axidma_tx_segment *segment) 786 { 787 xilinx_dma_clean_hw_desc(&segment->hw); 788 789 list_add_tail(&segment->node, &chan->free_seg_list); 790 } 791 792 /** 793 * xilinx_mcdma_free_tx_segment - Free transaction segment 794 * @chan: Driver specific DMA channel 795 * @segment: DMA transaction segment 796 */ 797 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, 798 struct xilinx_aximcdma_tx_segment * 799 segment) 800 { 801 xilinx_mcdma_clean_hw_desc(&segment->hw); 802 803 list_add_tail(&segment->node, &chan->free_seg_list); 804 } 805 806 /** 807 * xilinx_cdma_free_tx_segment - Free transaction segment 808 * @chan: Driver specific DMA channel 809 * @segment: DMA transaction segment 810 */ 811 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 812 struct xilinx_cdma_tx_segment *segment) 813 { 814 dma_pool_free(chan->desc_pool, segment, segment->phys); 815 } 816 817 /** 818 * xilinx_vdma_free_tx_segment - Free transaction segment 819 * @chan: Driver specific DMA channel 820 * @segment: DMA transaction segment 821 */ 822 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 823 struct xilinx_vdma_tx_segment *segment) 824 { 825 dma_pool_free(chan->desc_pool, segment, segment->phys); 826 } 827 828 /** 829 * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor 830 * @chan: Driver specific DMA channel 831 * 832 * Return: The allocated descriptor on success and NULL on failure. 833 */ 834 static struct xilinx_dma_tx_descriptor * 835 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 836 { 837 struct xilinx_dma_tx_descriptor *desc; 838 839 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 840 if (!desc) 841 return NULL; 842 843 INIT_LIST_HEAD(&desc->segments); 844 845 return desc; 846 } 847 848 /** 849 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 850 * @chan: Driver specific DMA channel 851 * @desc: DMA transaction descriptor 852 */ 853 static void 854 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 855 struct xilinx_dma_tx_descriptor *desc) 856 { 857 struct xilinx_vdma_tx_segment *segment, *next; 858 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 859 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 860 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; 861 862 if (!desc) 863 return; 864 865 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 866 list_for_each_entry_safe(segment, next, &desc->segments, node) { 867 list_del(&segment->node); 868 xilinx_vdma_free_tx_segment(chan, segment); 869 } 870 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 871 list_for_each_entry_safe(cdma_segment, cdma_next, 872 &desc->segments, node) { 873 list_del(&cdma_segment->node); 874 xilinx_cdma_free_tx_segment(chan, cdma_segment); 875 } 876 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 877 list_for_each_entry_safe(axidma_segment, axidma_next, 878 &desc->segments, node) { 879 list_del(&axidma_segment->node); 880 xilinx_dma_free_tx_segment(chan, axidma_segment); 881 } 882 } else { 883 list_for_each_entry_safe(aximcdma_segment, aximcdma_next, 884 &desc->segments, node) { 885 list_del(&aximcdma_segment->node); 886 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment); 887 } 888 } 889 890 kfree(desc); 891 } 892 893 /* Required functions */ 894 895 /** 896 * xilinx_dma_free_desc_list - Free descriptors list 897 * @chan: Driver specific DMA channel 898 * @list: List to parse and delete the descriptor 899 */ 900 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 901 struct list_head *list) 902 { 903 struct xilinx_dma_tx_descriptor *desc, *next; 904 905 list_for_each_entry_safe(desc, next, list, node) { 906 list_del(&desc->node); 907 xilinx_dma_free_tx_descriptor(chan, desc); 908 } 909 } 910 911 /** 912 * xilinx_dma_free_descriptors - Free channel descriptors 913 * @chan: Driver specific DMA channel 914 */ 915 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 916 { 917 unsigned long flags; 918 919 spin_lock_irqsave(&chan->lock, flags); 920 921 xilinx_dma_free_desc_list(chan, &chan->pending_list); 922 xilinx_dma_free_desc_list(chan, &chan->done_list); 923 xilinx_dma_free_desc_list(chan, &chan->active_list); 924 925 spin_unlock_irqrestore(&chan->lock, flags); 926 } 927 928 /** 929 * xilinx_dma_free_chan_resources - Free channel resources 930 * @dchan: DMA channel 931 */ 932 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 933 { 934 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 935 unsigned long flags; 936 937 dev_dbg(chan->dev, "Free all channel resources.\n"); 938 939 xilinx_dma_free_descriptors(chan); 940 941 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 942 spin_lock_irqsave(&chan->lock, flags); 943 INIT_LIST_HEAD(&chan->free_seg_list); 944 spin_unlock_irqrestore(&chan->lock, flags); 945 946 /* Free memory that is allocated for BD */ 947 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 948 XILINX_DMA_NUM_DESCS, chan->seg_v, 949 chan->seg_p); 950 951 /* Free Memory that is allocated for cyclic DMA Mode */ 952 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), 953 chan->cyclic_seg_v, chan->cyclic_seg_p); 954 } 955 956 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 957 spin_lock_irqsave(&chan->lock, flags); 958 INIT_LIST_HEAD(&chan->free_seg_list); 959 spin_unlock_irqrestore(&chan->lock, flags); 960 961 /* Free memory that is allocated for BD */ 962 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) * 963 XILINX_DMA_NUM_DESCS, chan->seg_mv, 964 chan->seg_p); 965 } 966 967 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && 968 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { 969 dma_pool_destroy(chan->desc_pool); 970 chan->desc_pool = NULL; 971 } 972 973 } 974 975 /** 976 * xilinx_dma_get_residue - Compute residue for a given descriptor 977 * @chan: Driver specific dma channel 978 * @desc: dma transaction descriptor 979 * 980 * Return: The number of residue bytes for the descriptor. 981 */ 982 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, 983 struct xilinx_dma_tx_descriptor *desc) 984 { 985 struct xilinx_cdma_tx_segment *cdma_seg; 986 struct xilinx_axidma_tx_segment *axidma_seg; 987 struct xilinx_aximcdma_tx_segment *aximcdma_seg; 988 struct xilinx_cdma_desc_hw *cdma_hw; 989 struct xilinx_axidma_desc_hw *axidma_hw; 990 struct xilinx_aximcdma_desc_hw *aximcdma_hw; 991 struct list_head *entry; 992 u32 residue = 0; 993 994 list_for_each(entry, &desc->segments) { 995 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 996 cdma_seg = list_entry(entry, 997 struct xilinx_cdma_tx_segment, 998 node); 999 cdma_hw = &cdma_seg->hw; 1000 residue += (cdma_hw->control - cdma_hw->status) & 1001 chan->xdev->max_buffer_len; 1002 } else if (chan->xdev->dma_config->dmatype == 1003 XDMA_TYPE_AXIDMA) { 1004 axidma_seg = list_entry(entry, 1005 struct xilinx_axidma_tx_segment, 1006 node); 1007 axidma_hw = &axidma_seg->hw; 1008 residue += (axidma_hw->control - axidma_hw->status) & 1009 chan->xdev->max_buffer_len; 1010 } else { 1011 aximcdma_seg = 1012 list_entry(entry, 1013 struct xilinx_aximcdma_tx_segment, 1014 node); 1015 aximcdma_hw = &aximcdma_seg->hw; 1016 residue += 1017 (aximcdma_hw->control - aximcdma_hw->status) & 1018 chan->xdev->max_buffer_len; 1019 } 1020 } 1021 1022 return residue; 1023 } 1024 1025 static u32 1026 xilinx_dma_get_residue_axidma_direct_s2mm(struct xilinx_dma_chan *chan, 1027 struct xilinx_dma_tx_descriptor *desc) 1028 { 1029 struct xilinx_axidma_tx_segment *seg; 1030 struct xilinx_axidma_desc_hw *hw; 1031 u32 finished_len; 1032 1033 finished_len = dma_ctrl_read(chan, XILINX_DMA_REG_BTT); 1034 1035 seg = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, 1036 node); 1037 1038 hw = &seg->hw; 1039 1040 return hw->control - finished_len; 1041 } 1042 1043 /** 1044 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 1045 * @chan: Driver specific dma channel 1046 * @desc: dma transaction descriptor 1047 * @flags: flags for spin lock 1048 */ 1049 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 1050 struct xilinx_dma_tx_descriptor *desc, 1051 unsigned long *flags) 1052 { 1053 struct dmaengine_desc_callback cb; 1054 1055 dmaengine_desc_get_callback(&desc->async_tx, &cb); 1056 if (dmaengine_desc_callback_valid(&cb)) { 1057 spin_unlock_irqrestore(&chan->lock, *flags); 1058 dmaengine_desc_callback_invoke(&cb, NULL); 1059 spin_lock_irqsave(&chan->lock, *flags); 1060 } 1061 } 1062 1063 /** 1064 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 1065 * @chan: Driver specific DMA channel 1066 */ 1067 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 1068 { 1069 struct xilinx_dma_tx_descriptor *desc, *next; 1070 unsigned long flags; 1071 1072 spin_lock_irqsave(&chan->lock, flags); 1073 1074 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 1075 struct dmaengine_result result; 1076 1077 if (desc->cyclic) { 1078 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 1079 break; 1080 } 1081 1082 /* Remove from the list of running transactions */ 1083 list_del(&desc->node); 1084 1085 if (unlikely(desc->err)) { 1086 if (chan->direction == DMA_DEV_TO_MEM) 1087 result.result = DMA_TRANS_READ_FAILED; 1088 else 1089 result.result = DMA_TRANS_WRITE_FAILED; 1090 } else { 1091 result.result = DMA_TRANS_NOERROR; 1092 } 1093 1094 result.residue = desc->residue; 1095 1096 /* Run the link descriptor callback function */ 1097 spin_unlock_irqrestore(&chan->lock, flags); 1098 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); 1099 spin_lock_irqsave(&chan->lock, flags); 1100 1101 /* Run any dependencies, then free the descriptor */ 1102 dma_run_dependencies(&desc->async_tx); 1103 xilinx_dma_free_tx_descriptor(chan, desc); 1104 1105 /* 1106 * While we ran a callback the user called a terminate function, 1107 * which takes care of cleaning up any remaining descriptors 1108 */ 1109 if (chan->terminating) 1110 break; 1111 } 1112 1113 spin_unlock_irqrestore(&chan->lock, flags); 1114 } 1115 1116 /** 1117 * xilinx_dma_do_tasklet - Schedule completion tasklet 1118 * @t: Pointer to the Xilinx DMA channel structure 1119 */ 1120 static void xilinx_dma_do_tasklet(struct tasklet_struct *t) 1121 { 1122 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet); 1123 1124 xilinx_dma_chan_desc_cleanup(chan); 1125 } 1126 1127 /** 1128 * xilinx_dma_alloc_chan_resources - Allocate channel resources 1129 * @dchan: DMA channel 1130 * 1131 * Return: '0' on success and failure value on error 1132 */ 1133 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 1134 { 1135 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1136 int i; 1137 1138 /* Has this channel already been allocated? */ 1139 if (chan->desc_pool) 1140 return 0; 1141 1142 /* 1143 * We need the descriptor to be aligned to 64bytes 1144 * for meeting Xilinx VDMA specification requirement. 1145 */ 1146 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1147 /* Allocate the buffer descriptors. */ 1148 chan->seg_v = dma_alloc_coherent(chan->dev, 1149 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, 1150 &chan->seg_p, GFP_KERNEL); 1151 if (!chan->seg_v) { 1152 dev_err(chan->dev, 1153 "unable to allocate channel %d descriptors\n", 1154 chan->id); 1155 return -ENOMEM; 1156 } 1157 /* 1158 * For cyclic DMA mode we need to program the tail Descriptor 1159 * register with a value which is not a part of the BD chain 1160 * so allocating a desc segment during channel allocation for 1161 * programming tail descriptor. 1162 */ 1163 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, 1164 sizeof(*chan->cyclic_seg_v), 1165 &chan->cyclic_seg_p, 1166 GFP_KERNEL); 1167 if (!chan->cyclic_seg_v) { 1168 dev_err(chan->dev, 1169 "unable to allocate desc segment for cyclic DMA\n"); 1170 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 1171 XILINX_DMA_NUM_DESCS, chan->seg_v, 1172 chan->seg_p); 1173 return -ENOMEM; 1174 } 1175 chan->cyclic_seg_v->phys = chan->cyclic_seg_p; 1176 1177 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 1178 chan->seg_v[i].hw.next_desc = 1179 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 1180 ((i + 1) % XILINX_DMA_NUM_DESCS)); 1181 chan->seg_v[i].hw.next_desc_msb = 1182 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 1183 ((i + 1) % XILINX_DMA_NUM_DESCS)); 1184 chan->seg_v[i].phys = chan->seg_p + 1185 sizeof(*chan->seg_v) * i; 1186 list_add_tail(&chan->seg_v[i].node, 1187 &chan->free_seg_list); 1188 } 1189 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 1190 /* Allocate the buffer descriptors. */ 1191 chan->seg_mv = dma_alloc_coherent(chan->dev, 1192 sizeof(*chan->seg_mv) * 1193 XILINX_DMA_NUM_DESCS, 1194 &chan->seg_p, GFP_KERNEL); 1195 if (!chan->seg_mv) { 1196 dev_err(chan->dev, 1197 "unable to allocate channel %d descriptors\n", 1198 chan->id); 1199 return -ENOMEM; 1200 } 1201 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 1202 chan->seg_mv[i].hw.next_desc = 1203 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * 1204 ((i + 1) % XILINX_DMA_NUM_DESCS)); 1205 chan->seg_mv[i].hw.next_desc_msb = 1206 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * 1207 ((i + 1) % XILINX_DMA_NUM_DESCS)); 1208 chan->seg_mv[i].phys = chan->seg_p + 1209 sizeof(*chan->seg_mv) * i; 1210 list_add_tail(&chan->seg_mv[i].node, 1211 &chan->free_seg_list); 1212 } 1213 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1214 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 1215 chan->dev, 1216 sizeof(struct xilinx_cdma_tx_segment), 1217 __alignof__(struct xilinx_cdma_tx_segment), 1218 0); 1219 } else { 1220 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 1221 chan->dev, 1222 sizeof(struct xilinx_vdma_tx_segment), 1223 __alignof__(struct xilinx_vdma_tx_segment), 1224 0); 1225 } 1226 1227 if (!chan->desc_pool && 1228 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && 1229 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { 1230 dev_err(chan->dev, 1231 "unable to allocate channel %d descriptor pool\n", 1232 chan->id); 1233 return -ENOMEM; 1234 } 1235 1236 dma_cookie_init(dchan); 1237 1238 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1239 /* For AXI DMA resetting once channel will reset the 1240 * other channel as well so enable the interrupts here. 1241 */ 1242 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1243 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1244 } 1245 1246 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 1247 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1248 XILINX_CDMA_CR_SGMODE); 1249 1250 return 0; 1251 } 1252 1253 /** 1254 * xilinx_dma_calc_copysize - Calculate the amount of data to copy 1255 * @chan: Driver specific DMA channel 1256 * @size: Total data that needs to be copied 1257 * @done: Amount of data that has been already copied 1258 * 1259 * Return: Amount of data that has to be copied 1260 */ 1261 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, 1262 int size, int done) 1263 { 1264 size_t copy; 1265 1266 copy = min_t(size_t, size - done, 1267 chan->xdev->max_buffer_len); 1268 1269 if ((copy + done < size) && 1270 chan->xdev->common.copy_align) { 1271 /* 1272 * If this is not the last descriptor, make sure 1273 * the next one will be properly aligned 1274 */ 1275 copy = rounddown(copy, 1276 (1 << chan->xdev->common.copy_align)); 1277 } 1278 return copy; 1279 } 1280 1281 /** 1282 * xilinx_dma_tx_status - Get DMA transaction status 1283 * @dchan: DMA channel 1284 * @cookie: Transaction identifier 1285 * @txstate: Transaction state 1286 * 1287 * Return: DMA transaction status 1288 */ 1289 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 1290 dma_cookie_t cookie, 1291 struct dma_tx_state *txstate) 1292 { 1293 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1294 struct xilinx_dma_tx_descriptor *desc; 1295 enum dma_status ret; 1296 unsigned long flags; 1297 u32 residue = 0; 1298 1299 ret = dma_cookie_status(dchan, cookie, txstate); 1300 if (ret == DMA_COMPLETE || !txstate) 1301 return ret; 1302 1303 spin_lock_irqsave(&chan->lock, flags); 1304 if (!list_empty(&chan->active_list)) { 1305 desc = list_last_entry(&chan->active_list, 1306 struct xilinx_dma_tx_descriptor, node); 1307 /* 1308 * VDMA and simple mode do not support residue reporting, so the 1309 * residue field will always be 0. 1310 */ 1311 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) 1312 residue = xilinx_dma_get_residue(chan, desc); 1313 } 1314 spin_unlock_irqrestore(&chan->lock, flags); 1315 1316 dma_set_residue(txstate, residue); 1317 1318 return ret; 1319 } 1320 1321 /** 1322 * xilinx_dma_stop_transfer - Halt DMA channel 1323 * @chan: Driver specific DMA channel 1324 * 1325 * Return: '0' on success and failure value on error 1326 */ 1327 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 1328 { 1329 u32 val; 1330 1331 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1332 1333 /* Wait for the hardware to halt */ 1334 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1335 val & XILINX_DMA_DMASR_HALTED, 0, 1336 XILINX_DMA_LOOP_COUNT); 1337 } 1338 1339 /** 1340 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 1341 * @chan: Driver specific DMA channel 1342 * 1343 * Return: '0' on success and failure value on error 1344 */ 1345 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 1346 { 1347 u32 val; 1348 1349 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1350 val & XILINX_DMA_DMASR_IDLE, 0, 1351 XILINX_DMA_LOOP_COUNT); 1352 } 1353 1354 /** 1355 * xilinx_dma_start - Start DMA channel 1356 * @chan: Driver specific DMA channel 1357 */ 1358 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 1359 { 1360 int err; 1361 u32 val; 1362 1363 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1364 1365 /* Wait for the hardware to start */ 1366 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1367 !(val & XILINX_DMA_DMASR_HALTED), 0, 1368 XILINX_DMA_LOOP_COUNT); 1369 1370 if (err) { 1371 dev_err(chan->dev, "Cannot start channel %p: %x\n", 1372 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1373 1374 chan->err = true; 1375 } 1376 } 1377 1378 /** 1379 * xilinx_vdma_start_transfer - Starts VDMA transfer 1380 * @chan: Driver specific channel struct pointer 1381 */ 1382 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1383 { 1384 struct xilinx_vdma_config *config = &chan->config; 1385 struct xilinx_dma_tx_descriptor *desc; 1386 u32 reg, j; 1387 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1388 int i = 0; 1389 1390 /* This function was invoked with lock held */ 1391 if (chan->err) 1392 return; 1393 1394 if (!chan->idle) 1395 return; 1396 1397 if (list_empty(&chan->pending_list)) 1398 return; 1399 1400 desc = list_first_entry(&chan->pending_list, 1401 struct xilinx_dma_tx_descriptor, node); 1402 1403 /* Configure the hardware using info in the config structure */ 1404 if (chan->has_vflip) { 1405 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1406 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 1407 reg |= config->vflip_en; 1408 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 1409 reg); 1410 } 1411 1412 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1413 1414 if (config->frm_cnt_en) 1415 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1416 else 1417 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1418 1419 /* If not parking, enable circular mode */ 1420 if (config->park) 1421 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1422 else 1423 reg |= XILINX_DMA_DMACR_CIRC_EN; 1424 1425 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1426 1427 if (config->park) { 1428 j = chan->desc_submitcount; 1429 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); 1430 if (chan->direction == DMA_MEM_TO_DEV) { 1431 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; 1432 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; 1433 } else { 1434 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; 1435 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; 1436 } 1437 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); 1438 } 1439 1440 /* Start the hardware */ 1441 xilinx_dma_start(chan); 1442 1443 if (chan->err) 1444 return; 1445 1446 /* Start the transfer */ 1447 if (chan->desc_submitcount < chan->num_frms) 1448 i = chan->desc_submitcount; 1449 1450 list_for_each_entry(segment, &desc->segments, node) { 1451 if (chan->ext_addr) 1452 vdma_desc_write_64(chan, 1453 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1454 segment->hw.buf_addr, 1455 segment->hw.buf_addr_msb); 1456 else 1457 vdma_desc_write(chan, 1458 XILINX_VDMA_REG_START_ADDRESS(i++), 1459 segment->hw.buf_addr); 1460 1461 last = segment; 1462 } 1463 1464 if (!last) 1465 return; 1466 1467 /* HW expects these parameters to be same for one transaction */ 1468 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1469 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1470 last->hw.stride); 1471 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1472 1473 chan->desc_submitcount++; 1474 chan->desc_pendingcount--; 1475 list_move_tail(&desc->node, &chan->active_list); 1476 if (chan->desc_submitcount == chan->num_frms) 1477 chan->desc_submitcount = 0; 1478 1479 chan->idle = false; 1480 } 1481 1482 /** 1483 * xilinx_cdma_start_transfer - Starts cdma transfer 1484 * @chan: Driver specific channel struct pointer 1485 */ 1486 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1487 { 1488 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1489 struct xilinx_cdma_tx_segment *tail_segment; 1490 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1491 1492 if (chan->err) 1493 return; 1494 1495 if (!chan->idle) 1496 return; 1497 1498 if (list_empty(&chan->pending_list)) 1499 return; 1500 1501 head_desc = list_first_entry(&chan->pending_list, 1502 struct xilinx_dma_tx_descriptor, node); 1503 tail_desc = list_last_entry(&chan->pending_list, 1504 struct xilinx_dma_tx_descriptor, node); 1505 tail_segment = list_last_entry(&tail_desc->segments, 1506 struct xilinx_cdma_tx_segment, node); 1507 1508 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1509 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1510 ctrl_reg |= chan->desc_pendingcount << 1511 XILINX_DMA_CR_COALESCE_SHIFT; 1512 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1513 } 1514 1515 if (chan->has_sg) { 1516 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 1517 XILINX_CDMA_CR_SGMODE); 1518 1519 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1520 XILINX_CDMA_CR_SGMODE); 1521 1522 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1523 head_desc->async_tx.phys); 1524 1525 /* Update tail ptr register which will start the transfer */ 1526 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1527 tail_segment->phys); 1528 } else { 1529 /* In simple mode */ 1530 struct xilinx_cdma_tx_segment *segment; 1531 struct xilinx_cdma_desc_hw *hw; 1532 1533 segment = list_first_entry(&head_desc->segments, 1534 struct xilinx_cdma_tx_segment, 1535 node); 1536 1537 hw = &segment->hw; 1538 1539 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, 1540 xilinx_prep_dma_addr_t(hw->src_addr)); 1541 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, 1542 xilinx_prep_dma_addr_t(hw->dest_addr)); 1543 1544 /* Start the transfer */ 1545 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1546 hw->control & chan->xdev->max_buffer_len); 1547 } 1548 1549 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1550 chan->desc_pendingcount = 0; 1551 chan->idle = false; 1552 } 1553 1554 /** 1555 * xilinx_dma_start_transfer - Starts DMA transfer 1556 * @chan: Driver specific channel struct pointer 1557 */ 1558 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1559 { 1560 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1561 struct xilinx_axidma_tx_segment *tail_segment; 1562 u32 reg; 1563 1564 if (chan->err) 1565 return; 1566 1567 if (list_empty(&chan->pending_list)) 1568 return; 1569 1570 if (!chan->idle) 1571 return; 1572 1573 head_desc = list_first_entry(&chan->pending_list, 1574 struct xilinx_dma_tx_descriptor, node); 1575 tail_desc = list_last_entry(&chan->pending_list, 1576 struct xilinx_dma_tx_descriptor, node); 1577 tail_segment = list_last_entry(&tail_desc->segments, 1578 struct xilinx_axidma_tx_segment, node); 1579 1580 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1581 1582 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1583 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1584 reg |= chan->desc_pendingcount << 1585 XILINX_DMA_CR_COALESCE_SHIFT; 1586 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1587 } 1588 1589 if (chan->has_sg) 1590 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1591 head_desc->async_tx.phys); 1592 reg &= ~XILINX_DMA_CR_DELAY_MAX; 1593 reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT; 1594 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1595 1596 xilinx_dma_start(chan); 1597 1598 if (chan->err) 1599 return; 1600 1601 /* Start the transfer */ 1602 if (chan->has_sg) { 1603 if (chan->cyclic) 1604 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1605 chan->cyclic_seg_v->phys); 1606 else 1607 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1608 tail_segment->phys); 1609 } else { 1610 struct xilinx_axidma_tx_segment *segment; 1611 struct xilinx_axidma_desc_hw *hw; 1612 1613 segment = list_first_entry(&head_desc->segments, 1614 struct xilinx_axidma_tx_segment, 1615 node); 1616 hw = &segment->hw; 1617 1618 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, 1619 xilinx_prep_dma_addr_t(hw->buf_addr)); 1620 1621 /* Start the transfer */ 1622 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1623 hw->control & chan->xdev->max_buffer_len); 1624 } 1625 1626 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1627 chan->desc_pendingcount = 0; 1628 chan->idle = false; 1629 } 1630 1631 /** 1632 * xilinx_mcdma_start_transfer - Starts MCDMA transfer 1633 * @chan: Driver specific channel struct pointer 1634 */ 1635 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) 1636 { 1637 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1638 struct xilinx_aximcdma_tx_segment *tail_segment; 1639 u32 reg; 1640 1641 /* 1642 * lock has been held by calling functions, so we don't need it 1643 * to take it here again. 1644 */ 1645 1646 if (chan->err) 1647 return; 1648 1649 if (!chan->idle) 1650 return; 1651 1652 if (list_empty(&chan->pending_list)) 1653 return; 1654 1655 head_desc = list_first_entry(&chan->pending_list, 1656 struct xilinx_dma_tx_descriptor, node); 1657 tail_desc = list_last_entry(&chan->pending_list, 1658 struct xilinx_dma_tx_descriptor, node); 1659 tail_segment = list_last_entry(&tail_desc->segments, 1660 struct xilinx_aximcdma_tx_segment, node); 1661 1662 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); 1663 1664 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { 1665 reg &= ~XILINX_MCDMA_COALESCE_MASK; 1666 reg |= chan->desc_pendingcount << 1667 XILINX_MCDMA_COALESCE_SHIFT; 1668 } 1669 1670 reg |= XILINX_MCDMA_IRQ_ALL_MASK; 1671 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); 1672 1673 /* Program current descriptor */ 1674 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), 1675 head_desc->async_tx.phys); 1676 1677 /* Program channel enable register */ 1678 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); 1679 reg |= BIT(chan->tdest); 1680 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg); 1681 1682 /* Start the fetch of BDs for the channel */ 1683 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); 1684 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; 1685 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); 1686 1687 xilinx_dma_start(chan); 1688 1689 if (chan->err) 1690 return; 1691 1692 /* Start the transfer */ 1693 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), 1694 tail_segment->phys); 1695 1696 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1697 chan->desc_pendingcount = 0; 1698 chan->idle = false; 1699 } 1700 1701 /** 1702 * xilinx_dma_issue_pending - Issue pending transactions 1703 * @dchan: DMA channel 1704 */ 1705 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1706 { 1707 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1708 unsigned long flags; 1709 1710 spin_lock_irqsave(&chan->lock, flags); 1711 chan->start_transfer(chan); 1712 spin_unlock_irqrestore(&chan->lock, flags); 1713 } 1714 1715 /** 1716 * xilinx_dma_device_config - Configure the DMA channel 1717 * @dchan: DMA channel 1718 * @config: channel configuration 1719 * 1720 * Return: 0 always. 1721 */ 1722 static int xilinx_dma_device_config(struct dma_chan *dchan, 1723 struct dma_slave_config *config) 1724 { 1725 return 0; 1726 } 1727 1728 /** 1729 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1730 * @chan : xilinx DMA channel 1731 * 1732 * CONTEXT: hardirq 1733 */ 1734 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1735 { 1736 struct xilinx_dma_tx_descriptor *desc, *next; 1737 1738 /* This function was invoked with lock held */ 1739 if (list_empty(&chan->active_list)) 1740 return; 1741 1742 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1743 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1744 struct xilinx_axidma_tx_segment *seg; 1745 1746 seg = list_last_entry(&desc->segments, 1747 struct xilinx_axidma_tx_segment, node); 1748 if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg) 1749 break; 1750 } 1751 if (chan->has_sg && chan->xdev->dma_config->dmatype != 1752 XDMA_TYPE_VDMA) 1753 desc->residue = xilinx_dma_get_residue(chan, desc); 1754 else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA && 1755 chan->direction == DMA_DEV_TO_MEM && !chan->has_sg) 1756 desc->residue = xilinx_dma_get_residue_axidma_direct_s2mm(chan, desc); 1757 else 1758 desc->residue = 0; 1759 desc->err = chan->err; 1760 1761 list_del(&desc->node); 1762 if (!desc->cyclic) 1763 dma_cookie_complete(&desc->async_tx); 1764 list_add_tail(&desc->node, &chan->done_list); 1765 } 1766 } 1767 1768 /** 1769 * xilinx_dma_reset - Reset DMA channel 1770 * @chan: Driver specific DMA channel 1771 * 1772 * Return: '0' on success and failure value on error 1773 */ 1774 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1775 { 1776 int err; 1777 u32 tmp; 1778 1779 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1780 1781 /* Wait for the hardware to finish reset */ 1782 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1783 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1784 XILINX_DMA_LOOP_COUNT); 1785 1786 if (err) { 1787 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1788 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1789 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1790 return -ETIMEDOUT; 1791 } 1792 1793 chan->err = false; 1794 chan->idle = true; 1795 chan->desc_pendingcount = 0; 1796 chan->desc_submitcount = 0; 1797 1798 return err; 1799 } 1800 1801 /** 1802 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1803 * @chan: Driver specific DMA channel 1804 * 1805 * Return: '0' on success and failure value on error 1806 */ 1807 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1808 { 1809 int err; 1810 1811 /* Reset VDMA */ 1812 err = xilinx_dma_reset(chan); 1813 if (err) 1814 return err; 1815 1816 /* Enable interrupts */ 1817 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1818 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1819 1820 return 0; 1821 } 1822 1823 /** 1824 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler 1825 * @irq: IRQ number 1826 * @data: Pointer to the Xilinx MCDMA channel structure 1827 * 1828 * Return: IRQ_HANDLED/IRQ_NONE 1829 */ 1830 static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) 1831 { 1832 struct xilinx_dma_chan *chan = data; 1833 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; 1834 1835 if (chan->direction == DMA_DEV_TO_MEM) 1836 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; 1837 else 1838 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; 1839 1840 /* Read the channel id raising the interrupt*/ 1841 chan_sermask = dma_ctrl_read(chan, ser_offset); 1842 chan_id = ffs(chan_sermask); 1843 1844 if (!chan_id) 1845 return IRQ_NONE; 1846 1847 if (chan->direction == DMA_DEV_TO_MEM) 1848 chan_offset = chan->xdev->dma_config->max_channels / 2; 1849 1850 chan_offset = chan_offset + (chan_id - 1); 1851 chan = chan->xdev->chan[chan_offset]; 1852 /* Read the status and ack the interrupts. */ 1853 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); 1854 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) 1855 return IRQ_NONE; 1856 1857 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), 1858 status & XILINX_MCDMA_IRQ_ALL_MASK); 1859 1860 if (status & XILINX_MCDMA_IRQ_ERR_MASK) { 1861 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", 1862 chan, 1863 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), 1864 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET 1865 (chan->tdest)), 1866 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET 1867 (chan->tdest))); 1868 chan->err = true; 1869 } 1870 1871 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { 1872 /* 1873 * Device takes too long to do the transfer when user requires 1874 * responsiveness. 1875 */ 1876 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1877 } 1878 1879 if (status & XILINX_MCDMA_IRQ_IOC_MASK) { 1880 spin_lock(&chan->lock); 1881 xilinx_dma_complete_descriptor(chan); 1882 chan->idle = true; 1883 chan->start_transfer(chan); 1884 spin_unlock(&chan->lock); 1885 } 1886 1887 tasklet_hi_schedule(&chan->tasklet); 1888 return IRQ_HANDLED; 1889 } 1890 1891 /** 1892 * xilinx_dma_irq_handler - DMA Interrupt handler 1893 * @irq: IRQ number 1894 * @data: Pointer to the Xilinx DMA channel structure 1895 * 1896 * Return: IRQ_HANDLED/IRQ_NONE 1897 */ 1898 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1899 { 1900 struct xilinx_dma_chan *chan = data; 1901 u32 status; 1902 1903 /* Read the status and ack the interrupts. */ 1904 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1905 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1906 return IRQ_NONE; 1907 1908 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1909 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1910 1911 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1912 /* 1913 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1914 * error is recoverable, ignore it. Otherwise flag the error. 1915 * 1916 * Only recoverable errors can be cleared in the DMASR register, 1917 * make sure not to write to other error bits to 1. 1918 */ 1919 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1920 1921 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1922 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1923 1924 if (!chan->flush_on_fsync || 1925 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1926 dev_err(chan->dev, 1927 "Channel %p has errors %x, cdr %x tdr %x\n", 1928 chan, errors, 1929 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1930 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1931 chan->err = true; 1932 } 1933 } 1934 1935 if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ | 1936 XILINX_DMA_DMASR_DLY_CNT_IRQ)) { 1937 spin_lock(&chan->lock); 1938 xilinx_dma_complete_descriptor(chan); 1939 chan->idle = true; 1940 chan->start_transfer(chan); 1941 spin_unlock(&chan->lock); 1942 } 1943 1944 tasklet_schedule(&chan->tasklet); 1945 return IRQ_HANDLED; 1946 } 1947 1948 /** 1949 * append_desc_queue - Queuing descriptor 1950 * @chan: Driver specific dma channel 1951 * @desc: dma transaction descriptor 1952 */ 1953 static void append_desc_queue(struct xilinx_dma_chan *chan, 1954 struct xilinx_dma_tx_descriptor *desc) 1955 { 1956 struct xilinx_vdma_tx_segment *tail_segment; 1957 struct xilinx_dma_tx_descriptor *tail_desc; 1958 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1959 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment; 1960 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1961 1962 if (list_empty(&chan->pending_list)) 1963 goto append; 1964 1965 /* 1966 * Add the hardware descriptor to the chain of hardware descriptors 1967 * that already exists in memory. 1968 */ 1969 tail_desc = list_last_entry(&chan->pending_list, 1970 struct xilinx_dma_tx_descriptor, node); 1971 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1972 tail_segment = list_last_entry(&tail_desc->segments, 1973 struct xilinx_vdma_tx_segment, 1974 node); 1975 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1976 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1977 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1978 struct xilinx_cdma_tx_segment, 1979 node); 1980 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1981 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 1982 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1983 struct xilinx_axidma_tx_segment, 1984 node); 1985 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1986 } else { 1987 aximcdma_tail_segment = 1988 list_last_entry(&tail_desc->segments, 1989 struct xilinx_aximcdma_tx_segment, 1990 node); 1991 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1992 } 1993 1994 /* 1995 * Add the software descriptor and all children to the list 1996 * of pending transactions 1997 */ 1998 append: 1999 list_add_tail(&desc->node, &chan->pending_list); 2000 chan->desc_pendingcount++; 2001 2002 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 2003 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 2004 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 2005 chan->desc_pendingcount = chan->num_frms; 2006 } 2007 } 2008 2009 /** 2010 * xilinx_dma_tx_submit - Submit DMA transaction 2011 * @tx: Async transaction descriptor 2012 * 2013 * Return: cookie value on success and failure value on error 2014 */ 2015 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 2016 { 2017 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 2018 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 2019 dma_cookie_t cookie; 2020 unsigned long flags; 2021 int err; 2022 2023 if (chan->cyclic) { 2024 xilinx_dma_free_tx_descriptor(chan, desc); 2025 return -EBUSY; 2026 } 2027 2028 if (chan->err) { 2029 /* 2030 * If reset fails, need to hard reset the system. 2031 * Channel is no longer functional 2032 */ 2033 err = xilinx_dma_chan_reset(chan); 2034 if (err < 0) 2035 return err; 2036 } 2037 2038 spin_lock_irqsave(&chan->lock, flags); 2039 2040 cookie = dma_cookie_assign(tx); 2041 2042 /* Put this transaction onto the tail of the pending queue */ 2043 append_desc_queue(chan, desc); 2044 2045 if (desc->cyclic) 2046 chan->cyclic = true; 2047 2048 chan->terminating = false; 2049 2050 spin_unlock_irqrestore(&chan->lock, flags); 2051 2052 return cookie; 2053 } 2054 2055 /** 2056 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 2057 * DMA_SLAVE transaction 2058 * @dchan: DMA channel 2059 * @xt: Interleaved template pointer 2060 * @flags: transfer ack flags 2061 * 2062 * Return: Async transaction descriptor on success and NULL on failure 2063 */ 2064 static struct dma_async_tx_descriptor * 2065 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 2066 struct dma_interleaved_template *xt, 2067 unsigned long flags) 2068 { 2069 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2070 struct xilinx_dma_tx_descriptor *desc; 2071 struct xilinx_vdma_tx_segment *segment; 2072 struct xilinx_vdma_desc_hw *hw; 2073 2074 if (!is_slave_direction(xt->dir)) 2075 return NULL; 2076 2077 if (!xt->numf || !xt->sgl[0].size) 2078 return NULL; 2079 2080 if (xt->numf & ~XILINX_DMA_VSIZE_MASK || 2081 xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK) 2082 return NULL; 2083 2084 if (xt->frame_size != 1) 2085 return NULL; 2086 2087 /* Allocate a transaction descriptor. */ 2088 desc = xilinx_dma_alloc_tx_descriptor(chan); 2089 if (!desc) 2090 return NULL; 2091 2092 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2093 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2094 async_tx_ack(&desc->async_tx); 2095 2096 /* Allocate the link descriptor from DMA pool */ 2097 segment = xilinx_vdma_alloc_tx_segment(chan); 2098 if (!segment) 2099 goto error; 2100 2101 /* Fill in the hardware descriptor */ 2102 hw = &segment->hw; 2103 hw->vsize = xt->numf; 2104 hw->hsize = xt->sgl[0].size; 2105 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 2106 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 2107 hw->stride |= chan->config.frm_dly << 2108 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 2109 2110 if (xt->dir != DMA_MEM_TO_DEV) { 2111 if (chan->ext_addr) { 2112 hw->buf_addr = lower_32_bits(xt->dst_start); 2113 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 2114 } else { 2115 hw->buf_addr = xt->dst_start; 2116 } 2117 } else { 2118 if (chan->ext_addr) { 2119 hw->buf_addr = lower_32_bits(xt->src_start); 2120 hw->buf_addr_msb = upper_32_bits(xt->src_start); 2121 } else { 2122 hw->buf_addr = xt->src_start; 2123 } 2124 } 2125 2126 /* Insert the segment into the descriptor segments list. */ 2127 list_add_tail(&segment->node, &desc->segments); 2128 2129 /* Link the last hardware descriptor with the first. */ 2130 segment = list_first_entry(&desc->segments, 2131 struct xilinx_vdma_tx_segment, node); 2132 desc->async_tx.phys = segment->phys; 2133 2134 return &desc->async_tx; 2135 2136 error: 2137 xilinx_dma_free_tx_descriptor(chan, desc); 2138 return NULL; 2139 } 2140 2141 /** 2142 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 2143 * @dchan: DMA channel 2144 * @dma_dst: destination address 2145 * @dma_src: source address 2146 * @len: transfer length 2147 * @flags: transfer ack flags 2148 * 2149 * Return: Async transaction descriptor on success and NULL on failure 2150 */ 2151 static struct dma_async_tx_descriptor * 2152 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 2153 dma_addr_t dma_src, size_t len, unsigned long flags) 2154 { 2155 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2156 struct xilinx_dma_tx_descriptor *desc; 2157 struct xilinx_cdma_tx_segment *segment; 2158 struct xilinx_cdma_desc_hw *hw; 2159 2160 if (!len || len > chan->xdev->max_buffer_len) 2161 return NULL; 2162 2163 desc = xilinx_dma_alloc_tx_descriptor(chan); 2164 if (!desc) 2165 return NULL; 2166 2167 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2168 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2169 2170 /* Allocate the link descriptor from DMA pool */ 2171 segment = xilinx_cdma_alloc_tx_segment(chan); 2172 if (!segment) 2173 goto error; 2174 2175 hw = &segment->hw; 2176 hw->control = len; 2177 hw->src_addr = dma_src; 2178 hw->dest_addr = dma_dst; 2179 if (chan->ext_addr) { 2180 hw->src_addr_msb = upper_32_bits(dma_src); 2181 hw->dest_addr_msb = upper_32_bits(dma_dst); 2182 } 2183 2184 /* Insert the segment into the descriptor segments list. */ 2185 list_add_tail(&segment->node, &desc->segments); 2186 2187 desc->async_tx.phys = segment->phys; 2188 hw->next_desc = segment->phys; 2189 2190 return &desc->async_tx; 2191 2192 error: 2193 xilinx_dma_free_tx_descriptor(chan, desc); 2194 return NULL; 2195 } 2196 2197 /** 2198 * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE 2199 * transaction from DMA vectors 2200 * @dchan: DMA channel 2201 * @vecs: Array of DMA vectors that should be transferred 2202 * @nb: number of entries in @vecs 2203 * @direction: DMA direction 2204 * @flags: transfer ack flags 2205 * 2206 * Return: Async transaction descriptor on success and NULL on failure 2207 */ 2208 static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec( 2209 struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb, 2210 enum dma_transfer_direction direction, unsigned long flags) 2211 { 2212 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2213 struct xilinx_dma_tx_descriptor *desc; 2214 struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL; 2215 size_t copy; 2216 size_t sg_used; 2217 unsigned int i; 2218 2219 if (!is_slave_direction(direction) || direction != chan->direction) 2220 return NULL; 2221 2222 desc = xilinx_dma_alloc_tx_descriptor(chan); 2223 if (!desc) 2224 return NULL; 2225 2226 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2227 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2228 2229 /* Build transactions using information from DMA vectors */ 2230 for (i = 0; i < nb; i++) { 2231 sg_used = 0; 2232 2233 /* Loop until the entire dma_vec entry is used */ 2234 while (sg_used < vecs[i].len) { 2235 struct xilinx_axidma_desc_hw *hw; 2236 2237 /* Get a free segment */ 2238 segment = xilinx_axidma_alloc_tx_segment(chan); 2239 if (!segment) 2240 goto error; 2241 2242 /* 2243 * Calculate the maximum number of bytes to transfer, 2244 * making sure it is less than the hw limit 2245 */ 2246 copy = xilinx_dma_calc_copysize(chan, vecs[i].len, 2247 sg_used); 2248 hw = &segment->hw; 2249 2250 /* Fill in the descriptor */ 2251 xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0); 2252 hw->control = copy; 2253 2254 if (prev) 2255 prev->hw.next_desc = segment->phys; 2256 2257 prev = segment; 2258 sg_used += copy; 2259 2260 /* 2261 * Insert the segment into the descriptor segments 2262 * list. 2263 */ 2264 list_add_tail(&segment->node, &desc->segments); 2265 } 2266 } 2267 2268 head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); 2269 desc->async_tx.phys = head->phys; 2270 2271 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2272 if (chan->direction == DMA_MEM_TO_DEV) { 2273 segment->hw.control |= XILINX_DMA_BD_SOP; 2274 segment = list_last_entry(&desc->segments, 2275 struct xilinx_axidma_tx_segment, 2276 node); 2277 segment->hw.control |= XILINX_DMA_BD_EOP; 2278 } 2279 2280 if (chan->xdev->has_axistream_connected) 2281 desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; 2282 2283 return &desc->async_tx; 2284 2285 error: 2286 xilinx_dma_free_tx_descriptor(chan, desc); 2287 return NULL; 2288 } 2289 2290 /** 2291 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 2292 * @dchan: DMA channel 2293 * @sgl: scatterlist to transfer to/from 2294 * @sg_len: number of entries in @scatterlist 2295 * @direction: DMA direction 2296 * @flags: transfer ack flags 2297 * @context: APP words of the descriptor 2298 * 2299 * Return: Async transaction descriptor on success and NULL on failure 2300 */ 2301 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 2302 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 2303 enum dma_transfer_direction direction, unsigned long flags, 2304 void *context) 2305 { 2306 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2307 struct xilinx_dma_tx_descriptor *desc; 2308 struct xilinx_axidma_tx_segment *segment = NULL; 2309 u32 *app_w = (u32 *)context; 2310 struct scatterlist *sg; 2311 size_t copy; 2312 size_t sg_used; 2313 unsigned int i; 2314 2315 if (!is_slave_direction(direction)) 2316 return NULL; 2317 2318 /* Allocate a transaction descriptor. */ 2319 desc = xilinx_dma_alloc_tx_descriptor(chan); 2320 if (!desc) 2321 return NULL; 2322 2323 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2324 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2325 2326 /* Build transactions using information in the scatter gather list */ 2327 for_each_sg(sgl, sg, sg_len, i) { 2328 sg_used = 0; 2329 2330 /* Loop until the entire scatterlist entry is used */ 2331 while (sg_used < sg_dma_len(sg)) { 2332 struct xilinx_axidma_desc_hw *hw; 2333 2334 /* Get a free segment */ 2335 segment = xilinx_axidma_alloc_tx_segment(chan); 2336 if (!segment) 2337 goto error; 2338 2339 /* 2340 * Calculate the maximum number of bytes to transfer, 2341 * making sure it is less than the hw limit 2342 */ 2343 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), 2344 sg_used); 2345 hw = &segment->hw; 2346 2347 /* Fill in the descriptor */ 2348 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 2349 sg_used, 0); 2350 2351 hw->control = copy; 2352 2353 if (chan->direction == DMA_MEM_TO_DEV) { 2354 if (app_w) 2355 memcpy(hw->app, app_w, sizeof(u32) * 2356 XILINX_DMA_NUM_APP_WORDS); 2357 } 2358 2359 sg_used += copy; 2360 2361 /* 2362 * Insert the segment into the descriptor segments 2363 * list. 2364 */ 2365 list_add_tail(&segment->node, &desc->segments); 2366 } 2367 } 2368 2369 segment = list_first_entry(&desc->segments, 2370 struct xilinx_axidma_tx_segment, node); 2371 desc->async_tx.phys = segment->phys; 2372 2373 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2374 if (chan->direction == DMA_MEM_TO_DEV) { 2375 segment->hw.control |= XILINX_DMA_BD_SOP; 2376 segment = list_last_entry(&desc->segments, 2377 struct xilinx_axidma_tx_segment, 2378 node); 2379 segment->hw.control |= XILINX_DMA_BD_EOP; 2380 } 2381 2382 if (chan->xdev->has_axistream_connected) 2383 desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; 2384 2385 return &desc->async_tx; 2386 2387 error: 2388 xilinx_dma_free_tx_descriptor(chan, desc); 2389 return NULL; 2390 } 2391 2392 /** 2393 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 2394 * @dchan: DMA channel 2395 * @buf_addr: Physical address of the buffer 2396 * @buf_len: Total length of the cyclic buffers 2397 * @period_len: length of individual cyclic buffer 2398 * @direction: DMA direction 2399 * @flags: transfer ack flags 2400 * 2401 * Return: Async transaction descriptor on success and NULL on failure 2402 */ 2403 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 2404 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 2405 size_t period_len, enum dma_transfer_direction direction, 2406 unsigned long flags) 2407 { 2408 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2409 struct xilinx_dma_tx_descriptor *desc; 2410 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 2411 size_t copy, sg_used; 2412 unsigned int num_periods; 2413 int i; 2414 u32 reg; 2415 2416 if (!period_len) 2417 return NULL; 2418 2419 num_periods = buf_len / period_len; 2420 2421 if (!num_periods) 2422 return NULL; 2423 2424 if (!is_slave_direction(direction)) 2425 return NULL; 2426 2427 /* Allocate a transaction descriptor. */ 2428 desc = xilinx_dma_alloc_tx_descriptor(chan); 2429 if (!desc) 2430 return NULL; 2431 2432 chan->direction = direction; 2433 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2434 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2435 2436 for (i = 0; i < num_periods; ++i) { 2437 sg_used = 0; 2438 2439 while (sg_used < period_len) { 2440 struct xilinx_axidma_desc_hw *hw; 2441 2442 /* Get a free segment */ 2443 segment = xilinx_axidma_alloc_tx_segment(chan); 2444 if (!segment) 2445 goto error; 2446 2447 /* 2448 * Calculate the maximum number of bytes to transfer, 2449 * making sure it is less than the hw limit 2450 */ 2451 copy = xilinx_dma_calc_copysize(chan, period_len, 2452 sg_used); 2453 hw = &segment->hw; 2454 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 2455 period_len * i); 2456 hw->control = copy; 2457 2458 if (prev) 2459 prev->hw.next_desc = segment->phys; 2460 2461 prev = segment; 2462 sg_used += copy; 2463 2464 /* 2465 * Insert the segment into the descriptor segments 2466 * list. 2467 */ 2468 list_add_tail(&segment->node, &desc->segments); 2469 } 2470 } 2471 2472 head_segment = list_first_entry(&desc->segments, 2473 struct xilinx_axidma_tx_segment, node); 2474 desc->async_tx.phys = head_segment->phys; 2475 2476 desc->cyclic = true; 2477 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2478 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2479 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2480 2481 segment = list_last_entry(&desc->segments, 2482 struct xilinx_axidma_tx_segment, 2483 node); 2484 segment->hw.next_desc = (u32) head_segment->phys; 2485 2486 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2487 if (direction == DMA_MEM_TO_DEV) { 2488 head_segment->hw.control |= XILINX_DMA_BD_SOP; 2489 segment->hw.control |= XILINX_DMA_BD_EOP; 2490 } 2491 2492 return &desc->async_tx; 2493 2494 error: 2495 xilinx_dma_free_tx_descriptor(chan, desc); 2496 return NULL; 2497 } 2498 2499 /** 2500 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 2501 * @dchan: DMA channel 2502 * @sgl: scatterlist to transfer to/from 2503 * @sg_len: number of entries in @scatterlist 2504 * @direction: DMA direction 2505 * @flags: transfer ack flags 2506 * @context: APP words of the descriptor 2507 * 2508 * Return: Async transaction descriptor on success and NULL on failure 2509 */ 2510 static struct dma_async_tx_descriptor * 2511 xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 2512 unsigned int sg_len, 2513 enum dma_transfer_direction direction, 2514 unsigned long flags, void *context) 2515 { 2516 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2517 struct xilinx_dma_tx_descriptor *desc; 2518 struct xilinx_aximcdma_tx_segment *segment = NULL; 2519 u32 *app_w = (u32 *)context; 2520 struct scatterlist *sg; 2521 size_t copy; 2522 size_t sg_used; 2523 unsigned int i; 2524 2525 if (!is_slave_direction(direction)) 2526 return NULL; 2527 2528 /* Allocate a transaction descriptor. */ 2529 desc = xilinx_dma_alloc_tx_descriptor(chan); 2530 if (!desc) 2531 return NULL; 2532 2533 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2534 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2535 2536 /* Build transactions using information in the scatter gather list */ 2537 for_each_sg(sgl, sg, sg_len, i) { 2538 sg_used = 0; 2539 2540 /* Loop until the entire scatterlist entry is used */ 2541 while (sg_used < sg_dma_len(sg)) { 2542 struct xilinx_aximcdma_desc_hw *hw; 2543 2544 /* Get a free segment */ 2545 segment = xilinx_aximcdma_alloc_tx_segment(chan); 2546 if (!segment) 2547 goto error; 2548 2549 /* 2550 * Calculate the maximum number of bytes to transfer, 2551 * making sure it is less than the hw limit 2552 */ 2553 copy = min_t(size_t, sg_dma_len(sg) - sg_used, 2554 chan->xdev->max_buffer_len); 2555 hw = &segment->hw; 2556 2557 /* Fill in the descriptor */ 2558 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), 2559 sg_used); 2560 hw->control = copy; 2561 2562 if (chan->direction == DMA_MEM_TO_DEV && app_w) { 2563 memcpy(hw->app, app_w, sizeof(u32) * 2564 XILINX_DMA_NUM_APP_WORDS); 2565 } 2566 2567 sg_used += copy; 2568 /* 2569 * Insert the segment into the descriptor segments 2570 * list. 2571 */ 2572 list_add_tail(&segment->node, &desc->segments); 2573 } 2574 } 2575 2576 segment = list_first_entry(&desc->segments, 2577 struct xilinx_aximcdma_tx_segment, node); 2578 desc->async_tx.phys = segment->phys; 2579 2580 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2581 if (chan->direction == DMA_MEM_TO_DEV) { 2582 segment->hw.control |= XILINX_MCDMA_BD_SOP; 2583 segment = list_last_entry(&desc->segments, 2584 struct xilinx_aximcdma_tx_segment, 2585 node); 2586 segment->hw.control |= XILINX_MCDMA_BD_EOP; 2587 } 2588 2589 return &desc->async_tx; 2590 2591 error: 2592 xilinx_dma_free_tx_descriptor(chan, desc); 2593 2594 return NULL; 2595 } 2596 2597 /** 2598 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2599 * @dchan: Driver specific DMA Channel pointer 2600 * 2601 * Return: '0' always. 2602 */ 2603 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2604 { 2605 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2606 u32 reg; 2607 int err; 2608 2609 if (!chan->cyclic) { 2610 err = chan->stop_transfer(chan); 2611 if (err) { 2612 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2613 chan, dma_ctrl_read(chan, 2614 XILINX_DMA_REG_DMASR)); 2615 chan->err = true; 2616 } 2617 } 2618 2619 xilinx_dma_chan_reset(chan); 2620 /* Remove and free all of the descriptors in the lists */ 2621 chan->terminating = true; 2622 xilinx_dma_free_descriptors(chan); 2623 chan->idle = true; 2624 2625 if (chan->cyclic) { 2626 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2627 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2628 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2629 chan->cyclic = false; 2630 } 2631 2632 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 2633 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2634 XILINX_CDMA_CR_SGMODE); 2635 2636 return 0; 2637 } 2638 2639 static void xilinx_dma_synchronize(struct dma_chan *dchan) 2640 { 2641 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2642 2643 tasklet_kill(&chan->tasklet); 2644 } 2645 2646 /** 2647 * xilinx_vdma_channel_set_config - Configure VDMA channel 2648 * Run-time configuration for Axi VDMA, supports: 2649 * . halt the channel 2650 * . configure interrupt coalescing and inter-packet delay threshold 2651 * . start/stop parking 2652 * . enable genlock 2653 * 2654 * @dchan: DMA channel 2655 * @cfg: VDMA device configuration pointer 2656 * 2657 * Return: '0' on success and failure value on error 2658 */ 2659 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2660 struct xilinx_vdma_config *cfg) 2661 { 2662 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2663 u32 dmacr; 2664 2665 if (cfg->reset) 2666 return xilinx_dma_chan_reset(chan); 2667 2668 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2669 2670 chan->config.frm_dly = cfg->frm_dly; 2671 chan->config.park = cfg->park; 2672 2673 /* genlock settings */ 2674 chan->config.gen_lock = cfg->gen_lock; 2675 chan->config.master = cfg->master; 2676 2677 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; 2678 if (cfg->gen_lock && chan->genlock) { 2679 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2680 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; 2681 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2682 } 2683 2684 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2685 chan->config.vflip_en = cfg->vflip_en; 2686 2687 if (cfg->park) 2688 chan->config.park_frm = cfg->park_frm; 2689 else 2690 chan->config.park_frm = -1; 2691 2692 chan->config.coalesc = cfg->coalesc; 2693 chan->config.delay = cfg->delay; 2694 2695 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2696 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; 2697 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2698 chan->config.coalesc = cfg->coalesc; 2699 } 2700 2701 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2702 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; 2703 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2704 chan->config.delay = cfg->delay; 2705 } 2706 2707 /* FSync Source selection */ 2708 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2709 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2710 2711 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2712 2713 return 0; 2714 } 2715 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2716 2717 /* ----------------------------------------------------------------------------- 2718 * Probe and remove 2719 */ 2720 2721 /** 2722 * xilinx_dma_chan_remove - Per Channel remove function 2723 * @chan: Driver specific DMA channel 2724 */ 2725 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2726 { 2727 /* Disable all interrupts */ 2728 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2729 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2730 2731 if (chan->irq > 0) 2732 free_irq(chan->irq, chan); 2733 2734 tasklet_kill(&chan->tasklet); 2735 2736 list_del(&chan->common.device_node); 2737 } 2738 2739 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2740 struct clk **tx_clk, struct clk **rx_clk, 2741 struct clk **sg_clk, struct clk **tmp_clk) 2742 { 2743 int err; 2744 2745 *tmp_clk = NULL; 2746 2747 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2748 if (IS_ERR(*axi_clk)) 2749 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); 2750 2751 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2752 if (IS_ERR(*tx_clk)) 2753 *tx_clk = NULL; 2754 2755 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2756 if (IS_ERR(*rx_clk)) 2757 *rx_clk = NULL; 2758 2759 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2760 if (IS_ERR(*sg_clk)) 2761 *sg_clk = NULL; 2762 2763 err = clk_prepare_enable(*axi_clk); 2764 if (err) { 2765 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2766 return err; 2767 } 2768 2769 err = clk_prepare_enable(*tx_clk); 2770 if (err) { 2771 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2772 goto err_disable_axiclk; 2773 } 2774 2775 err = clk_prepare_enable(*rx_clk); 2776 if (err) { 2777 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2778 goto err_disable_txclk; 2779 } 2780 2781 err = clk_prepare_enable(*sg_clk); 2782 if (err) { 2783 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 2784 goto err_disable_rxclk; 2785 } 2786 2787 return 0; 2788 2789 err_disable_rxclk: 2790 clk_disable_unprepare(*rx_clk); 2791 err_disable_txclk: 2792 clk_disable_unprepare(*tx_clk); 2793 err_disable_axiclk: 2794 clk_disable_unprepare(*axi_clk); 2795 2796 return err; 2797 } 2798 2799 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2800 struct clk **dev_clk, struct clk **tmp_clk, 2801 struct clk **tmp1_clk, struct clk **tmp2_clk) 2802 { 2803 int err; 2804 2805 *tmp_clk = NULL; 2806 *tmp1_clk = NULL; 2807 *tmp2_clk = NULL; 2808 2809 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2810 if (IS_ERR(*axi_clk)) 2811 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); 2812 2813 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2814 if (IS_ERR(*dev_clk)) 2815 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n"); 2816 2817 err = clk_prepare_enable(*axi_clk); 2818 if (err) { 2819 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2820 return err; 2821 } 2822 2823 err = clk_prepare_enable(*dev_clk); 2824 if (err) { 2825 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 2826 goto err_disable_axiclk; 2827 } 2828 2829 return 0; 2830 2831 err_disable_axiclk: 2832 clk_disable_unprepare(*axi_clk); 2833 2834 return err; 2835 } 2836 2837 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2838 struct clk **tx_clk, struct clk **txs_clk, 2839 struct clk **rx_clk, struct clk **rxs_clk) 2840 { 2841 int err; 2842 2843 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2844 if (IS_ERR(*axi_clk)) 2845 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); 2846 2847 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2848 if (IS_ERR(*tx_clk)) 2849 *tx_clk = NULL; 2850 2851 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2852 if (IS_ERR(*txs_clk)) 2853 *txs_clk = NULL; 2854 2855 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2856 if (IS_ERR(*rx_clk)) 2857 *rx_clk = NULL; 2858 2859 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2860 if (IS_ERR(*rxs_clk)) 2861 *rxs_clk = NULL; 2862 2863 err = clk_prepare_enable(*axi_clk); 2864 if (err) { 2865 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", 2866 err); 2867 return err; 2868 } 2869 2870 err = clk_prepare_enable(*tx_clk); 2871 if (err) { 2872 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2873 goto err_disable_axiclk; 2874 } 2875 2876 err = clk_prepare_enable(*txs_clk); 2877 if (err) { 2878 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 2879 goto err_disable_txclk; 2880 } 2881 2882 err = clk_prepare_enable(*rx_clk); 2883 if (err) { 2884 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2885 goto err_disable_txsclk; 2886 } 2887 2888 err = clk_prepare_enable(*rxs_clk); 2889 if (err) { 2890 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 2891 goto err_disable_rxclk; 2892 } 2893 2894 return 0; 2895 2896 err_disable_rxclk: 2897 clk_disable_unprepare(*rx_clk); 2898 err_disable_txsclk: 2899 clk_disable_unprepare(*txs_clk); 2900 err_disable_txclk: 2901 clk_disable_unprepare(*tx_clk); 2902 err_disable_axiclk: 2903 clk_disable_unprepare(*axi_clk); 2904 2905 return err; 2906 } 2907 2908 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2909 { 2910 clk_disable_unprepare(xdev->rxs_clk); 2911 clk_disable_unprepare(xdev->rx_clk); 2912 clk_disable_unprepare(xdev->txs_clk); 2913 clk_disable_unprepare(xdev->tx_clk); 2914 clk_disable_unprepare(xdev->axi_clk); 2915 } 2916 2917 /** 2918 * xilinx_dma_chan_probe - Per Channel Probing 2919 * It get channel features from the device tree entry and 2920 * initialize special channel handling routines 2921 * 2922 * @xdev: Driver specific device structure 2923 * @node: Device node 2924 * 2925 * Return: '0' on success and failure value on error 2926 */ 2927 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2928 struct device_node *node) 2929 { 2930 struct xilinx_dma_chan *chan; 2931 bool has_dre = false; 2932 u32 value, width; 2933 int err; 2934 2935 /* Allocate and initialize the channel structure */ 2936 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2937 if (!chan) 2938 return -ENOMEM; 2939 2940 chan->dev = xdev->dev; 2941 chan->xdev = xdev; 2942 chan->desc_pendingcount = 0x0; 2943 chan->ext_addr = xdev->ext_addr; 2944 /* This variable ensures that descriptors are not 2945 * Submitted when dma engine is in progress. This variable is 2946 * Added to avoid polling for a bit in the status register to 2947 * Know dma state in the driver hot path. 2948 */ 2949 chan->idle = true; 2950 2951 spin_lock_init(&chan->lock); 2952 INIT_LIST_HEAD(&chan->pending_list); 2953 INIT_LIST_HEAD(&chan->done_list); 2954 INIT_LIST_HEAD(&chan->active_list); 2955 INIT_LIST_HEAD(&chan->free_seg_list); 2956 2957 /* Retrieve the channel properties from the device tree */ 2958 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2959 2960 of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay); 2961 2962 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2963 2964 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2965 if (err) { 2966 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2967 return err; 2968 } 2969 width = value >> 3; /* Convert bits to bytes */ 2970 2971 /* If data width is greater than 8 bytes, DRE is not in hw */ 2972 if (width > 8) 2973 has_dre = false; 2974 2975 if (!has_dre) 2976 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); 2977 2978 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2979 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2980 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2981 chan->direction = DMA_MEM_TO_DEV; 2982 chan->id = xdev->mm2s_chan_id++; 2983 chan->tdest = chan->id; 2984 2985 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2986 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2987 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2988 chan->config.park = 1; 2989 2990 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2991 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2992 chan->flush_on_fsync = true; 2993 } 2994 } else if (of_device_is_compatible(node, 2995 "xlnx,axi-vdma-s2mm-channel") || 2996 of_device_is_compatible(node, 2997 "xlnx,axi-dma-s2mm-channel")) { 2998 chan->direction = DMA_DEV_TO_MEM; 2999 chan->id = xdev->s2mm_chan_id++; 3000 chan->tdest = chan->id - xdev->dma_config->max_channels / 2; 3001 chan->has_vflip = of_property_read_bool(node, 3002 "xlnx,enable-vert-flip"); 3003 if (chan->has_vflip) { 3004 chan->config.vflip_en = dma_read(chan, 3005 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 3006 XILINX_VDMA_ENABLE_VERTICAL_FLIP; 3007 } 3008 3009 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) 3010 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; 3011 else 3012 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 3013 3014 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 3015 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 3016 chan->config.park = 1; 3017 3018 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 3019 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 3020 chan->flush_on_fsync = true; 3021 } 3022 } else { 3023 dev_err(xdev->dev, "Invalid channel compatible node\n"); 3024 return -EINVAL; 3025 } 3026 3027 xdev->common.directions |= chan->direction; 3028 3029 /* Request the interrupt */ 3030 chan->irq = of_irq_get(node, chan->tdest); 3031 if (chan->irq < 0) 3032 return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n"); 3033 err = request_irq(chan->irq, xdev->dma_config->irq_handler, 3034 IRQF_SHARED, "xilinx-dma-controller", chan); 3035 if (err) { 3036 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 3037 return err; 3038 } 3039 3040 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 3041 chan->start_transfer = xilinx_dma_start_transfer; 3042 chan->stop_transfer = xilinx_dma_stop_transfer; 3043 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 3044 chan->start_transfer = xilinx_mcdma_start_transfer; 3045 chan->stop_transfer = xilinx_dma_stop_transfer; 3046 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 3047 chan->start_transfer = xilinx_cdma_start_transfer; 3048 chan->stop_transfer = xilinx_cdma_stop_transfer; 3049 } else { 3050 chan->start_transfer = xilinx_vdma_start_transfer; 3051 chan->stop_transfer = xilinx_dma_stop_transfer; 3052 } 3053 3054 /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */ 3055 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { 3056 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA || 3057 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & 3058 XILINX_DMA_DMASR_SG_MASK) 3059 chan->has_sg = true; 3060 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, 3061 str_enabled_disabled(chan->has_sg)); 3062 } 3063 3064 /* Initialize the tasklet */ 3065 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet); 3066 3067 /* 3068 * Initialize the DMA channel and add it to the DMA engine channels 3069 * list. 3070 */ 3071 chan->common.device = &xdev->common; 3072 3073 list_add_tail(&chan->common.device_node, &xdev->common.channels); 3074 xdev->chan[chan->id] = chan; 3075 3076 /* Reset the channel */ 3077 err = xilinx_dma_chan_reset(chan); 3078 if (err < 0) { 3079 dev_err(xdev->dev, "Reset channel failed\n"); 3080 return err; 3081 } 3082 3083 return 0; 3084 } 3085 3086 /** 3087 * xilinx_dma_child_probe - Per child node probe 3088 * It get number of dma-channels per child node from 3089 * device-tree and initializes all the channels. 3090 * 3091 * @xdev: Driver specific device structure 3092 * @node: Device node 3093 * 3094 * Return: '0' on success and failure value on error. 3095 */ 3096 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 3097 struct device_node *node) 3098 { 3099 int ret, i; 3100 u32 nr_channels = 1; 3101 3102 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 3103 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) 3104 dev_warn(xdev->dev, "missing dma-channels property\n"); 3105 3106 for (i = 0; i < nr_channels; i++) { 3107 ret = xilinx_dma_chan_probe(xdev, node); 3108 if (ret) 3109 return ret; 3110 } 3111 3112 return 0; 3113 } 3114 3115 /** 3116 * of_dma_xilinx_xlate - Translation function 3117 * @dma_spec: Pointer to DMA specifier as found in the device tree 3118 * @ofdma: Pointer to DMA controller data 3119 * 3120 * Return: DMA channel pointer on success and NULL on error 3121 */ 3122 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 3123 struct of_dma *ofdma) 3124 { 3125 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 3126 int chan_id = dma_spec->args[0]; 3127 3128 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id]) 3129 return NULL; 3130 3131 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 3132 } 3133 3134 static const struct xilinx_dma_config axidma_config = { 3135 .dmatype = XDMA_TYPE_AXIDMA, 3136 .clk_init = axidma_clk_init, 3137 .irq_handler = xilinx_dma_irq_handler, 3138 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, 3139 }; 3140 3141 static const struct xilinx_dma_config aximcdma_config = { 3142 .dmatype = XDMA_TYPE_AXIMCDMA, 3143 .clk_init = axidma_clk_init, 3144 .irq_handler = xilinx_mcdma_irq_handler, 3145 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE, 3146 }; 3147 static const struct xilinx_dma_config axicdma_config = { 3148 .dmatype = XDMA_TYPE_CDMA, 3149 .clk_init = axicdma_clk_init, 3150 .irq_handler = xilinx_dma_irq_handler, 3151 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE, 3152 }; 3153 3154 static const struct xilinx_dma_config axivdma_config = { 3155 .dmatype = XDMA_TYPE_VDMA, 3156 .clk_init = axivdma_clk_init, 3157 .irq_handler = xilinx_dma_irq_handler, 3158 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, 3159 }; 3160 3161 static const struct of_device_id xilinx_dma_of_ids[] = { 3162 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 3163 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 3164 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 3165 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, 3166 {} 3167 }; 3168 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 3169 3170 /** 3171 * xilinx_dma_probe - Driver probe function 3172 * @pdev: Pointer to the platform_device structure 3173 * 3174 * Return: '0' on success and failure value on error 3175 */ 3176 static int xilinx_dma_probe(struct platform_device *pdev) 3177 { 3178 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 3179 struct clk **, struct clk **, struct clk **) 3180 = axivdma_clk_init; 3181 struct device_node *node = pdev->dev.of_node; 3182 struct xilinx_dma_device *xdev; 3183 struct device_node *child, *np = pdev->dev.of_node; 3184 u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width; 3185 int i, err; 3186 3187 /* Allocate and initialize the DMA engine structure */ 3188 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 3189 if (!xdev) 3190 return -ENOMEM; 3191 3192 xdev->dev = &pdev->dev; 3193 if (np) { 3194 const struct of_device_id *match; 3195 3196 match = of_match_node(xilinx_dma_of_ids, np); 3197 if (match && match->data) { 3198 xdev->dma_config = match->data; 3199 clk_init = xdev->dma_config->clk_init; 3200 } 3201 } 3202 3203 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 3204 &xdev->rx_clk, &xdev->rxs_clk); 3205 if (err) 3206 return err; 3207 3208 /* Request and map I/O memory */ 3209 xdev->regs = devm_platform_ioremap_resource(pdev, 0); 3210 if (IS_ERR(xdev->regs)) { 3211 err = PTR_ERR(xdev->regs); 3212 goto disable_clks; 3213 } 3214 /* Retrieve the DMA engine properties from the device tree */ 3215 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 3216 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; 3217 3218 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || 3219 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 3220 if (!of_property_read_u32(node, "xlnx,sg-length-width", 3221 &len_width)) { 3222 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || 3223 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { 3224 dev_warn(xdev->dev, 3225 "invalid xlnx,sg-length-width property value. Using default width\n"); 3226 } else { 3227 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) 3228 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); 3229 xdev->max_buffer_len = 3230 GENMASK(len_width - 1, 0); 3231 } 3232 } 3233 } 3234 3235 dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len); 3236 3237 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 3238 xdev->has_axistream_connected = 3239 of_property_read_bool(node, "xlnx,axistream-connected"); 3240 } 3241 3242 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 3243 err = of_property_read_u32(node, "xlnx,num-fstores", 3244 &num_frames); 3245 if (err < 0) { 3246 dev_err(xdev->dev, 3247 "missing xlnx,num-fstores property\n"); 3248 goto disable_clks; 3249 } 3250 3251 err = of_property_read_u32(node, "xlnx,flush-fsync", 3252 &xdev->flush_on_fsync); 3253 if (err < 0) 3254 dev_warn(xdev->dev, 3255 "missing xlnx,flush-fsync property\n"); 3256 } 3257 3258 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 3259 if (err < 0) 3260 dev_warn(xdev->dev, 3261 "missing xlnx,addrwidth property, using default value %d\n", 3262 XILINX_DMA_DFAULT_ADDRWIDTH); 3263 3264 if (addr_width > 32) 3265 xdev->ext_addr = true; 3266 else 3267 xdev->ext_addr = false; 3268 3269 /* Set metadata mode */ 3270 if (xdev->has_axistream_connected) 3271 xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE; 3272 3273 /* Set the dma mask bits */ 3274 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); 3275 if (err < 0) { 3276 dev_err(xdev->dev, "DMA mask error %d\n", err); 3277 goto disable_clks; 3278 } 3279 3280 /* Initialize the DMA engine */ 3281 xdev->common.dev = &pdev->dev; 3282 3283 INIT_LIST_HEAD(&xdev->common.channels); 3284 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 3285 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 3286 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 3287 } 3288 3289 xdev->common.device_alloc_chan_resources = 3290 xilinx_dma_alloc_chan_resources; 3291 xdev->common.device_free_chan_resources = 3292 xilinx_dma_free_chan_resources; 3293 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 3294 xdev->common.device_synchronize = xilinx_dma_synchronize; 3295 xdev->common.device_tx_status = xilinx_dma_tx_status; 3296 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 3297 xdev->common.device_config = xilinx_dma_device_config; 3298 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 3299 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 3300 xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec; 3301 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 3302 xdev->common.device_prep_dma_cyclic = 3303 xilinx_dma_prep_dma_cyclic; 3304 /* Residue calculation is supported by only AXI DMA and CDMA */ 3305 xdev->common.residue_granularity = 3306 DMA_RESIDUE_GRANULARITY_SEGMENT; 3307 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 3308 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 3309 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 3310 /* Residue calculation is supported by only AXI DMA and CDMA */ 3311 xdev->common.residue_granularity = 3312 DMA_RESIDUE_GRANULARITY_SEGMENT; 3313 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 3314 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; 3315 } else { 3316 xdev->common.device_prep_interleaved_dma = 3317 xilinx_vdma_dma_prep_interleaved; 3318 } 3319 3320 platform_set_drvdata(pdev, xdev); 3321 3322 /* Initialize the channels */ 3323 for_each_child_of_node(node, child) { 3324 err = xilinx_dma_child_probe(xdev, child); 3325 if (err < 0) { 3326 of_node_put(child); 3327 goto error; 3328 } 3329 } 3330 3331 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 3332 for (i = 0; i < xdev->dma_config->max_channels; i++) 3333 if (xdev->chan[i]) 3334 xdev->chan[i]->num_frms = num_frames; 3335 } 3336 3337 /* Register the DMA engine with the core */ 3338 err = dma_async_device_register(&xdev->common); 3339 if (err) { 3340 dev_err(xdev->dev, "failed to register the dma device\n"); 3341 goto error; 3342 } 3343 3344 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 3345 xdev); 3346 if (err < 0) { 3347 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 3348 dma_async_device_unregister(&xdev->common); 3349 goto error; 3350 } 3351 3352 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 3353 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 3354 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 3355 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 3356 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) 3357 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); 3358 else 3359 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 3360 3361 return 0; 3362 3363 error: 3364 for (i = 0; i < xdev->dma_config->max_channels; i++) 3365 if (xdev->chan[i]) 3366 xilinx_dma_chan_remove(xdev->chan[i]); 3367 disable_clks: 3368 xdma_disable_allclks(xdev); 3369 3370 return err; 3371 } 3372 3373 /** 3374 * xilinx_dma_remove - Driver remove function 3375 * @pdev: Pointer to the platform_device structure 3376 */ 3377 static void xilinx_dma_remove(struct platform_device *pdev) 3378 { 3379 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 3380 int i; 3381 3382 of_dma_controller_free(pdev->dev.of_node); 3383 3384 dma_async_device_unregister(&xdev->common); 3385 3386 for (i = 0; i < xdev->dma_config->max_channels; i++) 3387 if (xdev->chan[i]) 3388 xilinx_dma_chan_remove(xdev->chan[i]); 3389 3390 xdma_disable_allclks(xdev); 3391 } 3392 3393 static struct platform_driver xilinx_vdma_driver = { 3394 .driver = { 3395 .name = "xilinx-vdma", 3396 .of_match_table = xilinx_dma_of_ids, 3397 }, 3398 .probe = xilinx_dma_probe, 3399 .remove = xilinx_dma_remove, 3400 }; 3401 3402 module_platform_driver(xilinx_vdma_driver); 3403 3404 MODULE_AUTHOR("Xilinx, Inc."); 3405 MODULE_DESCRIPTION("Xilinx VDMA driver"); 3406 MODULE_LICENSE("GPL v2"); 3407