1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
28 *
29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
30 * Xilinx IP that provides high-bandwidth direct memory access between
31 * memory and AXI4-Stream target peripherals. It provides scatter gather
32 * (SG) interface with multiple channels independent configuration support.
33 *
34 */
35
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_irq.h>
47 #include <linux/platform_device.h>
48 #include <linux/slab.h>
49 #include <linux/string_choices.h>
50 #include <linux/clk.h>
51 #include <linux/io-64-nonatomic-lo-hi.h>
52
53 #include "../dmaengine.h"
54
55 /* Register/Descriptor Offsets */
56 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
57 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
58 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
59 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
60
61 /* Control Registers */
62 #define XILINX_DMA_REG_DMACR 0x0000
63 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
64 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
65 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
66 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
67 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
68 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
69 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
70 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
71 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
72 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
73 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
74 #define XILINX_DMA_DMACR_RESET BIT(2)
75 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
76 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
77 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
78 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
79 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
80 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
81
82 #define XILINX_DMA_REG_DMASR 0x0004
83 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
84 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
85 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
86 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
87 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
88 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
89 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
90 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
91 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
92 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
93 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
94 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
95 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
96 #define XILINX_DMA_DMASR_IDLE BIT(1)
97 #define XILINX_DMA_DMASR_HALTED BIT(0)
98 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
99 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
100
101 #define XILINX_DMA_REG_CURDESC 0x0008
102 #define XILINX_DMA_REG_TAILDESC 0x0010
103 #define XILINX_DMA_REG_REG_INDEX 0x0014
104 #define XILINX_DMA_REG_FRMSTORE 0x0018
105 #define XILINX_DMA_REG_THRESHOLD 0x001c
106 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
107 #define XILINX_DMA_REG_PARK_PTR 0x0028
108 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
109 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
110 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
111 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
112 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
113
114 /* Register Direct Mode Registers */
115 #define XILINX_DMA_REG_VSIZE 0x0000
116 #define XILINX_DMA_VSIZE_MASK GENMASK(12, 0)
117 #define XILINX_DMA_REG_HSIZE 0x0004
118 #define XILINX_DMA_HSIZE_MASK GENMASK(15, 0)
119
120 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
121 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
122 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
123
124 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
125 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
126
127 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
128 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
129
130 /* HW specific definitions */
131 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
132 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
133 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
134 #define XILINX_DMA_DFAULT_ADDRWIDTH 0x20
135
136 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
137 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
138 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
139 XILINX_DMA_DMASR_ERR_IRQ)
140
141 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
142 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
143 XILINX_DMA_DMASR_SOF_LATE_ERR | \
144 XILINX_DMA_DMASR_SG_DEC_ERR | \
145 XILINX_DMA_DMASR_SG_SLV_ERR | \
146 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
147 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
148 XILINX_DMA_DMASR_DMA_DEC_ERR | \
149 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
150 XILINX_DMA_DMASR_DMA_INT_ERR)
151
152 /*
153 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
154 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
155 * is enabled in the h/w system.
156 */
157 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
158 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
159 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
160 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
161 XILINX_DMA_DMASR_DMA_INT_ERR)
162
163 /* Axi VDMA Flush on Fsync bits */
164 #define XILINX_DMA_FLUSH_S2MM 3
165 #define XILINX_DMA_FLUSH_MM2S 2
166 #define XILINX_DMA_FLUSH_BOTH 1
167
168 /* Delay loop counter to prevent hardware failure */
169 #define XILINX_DMA_LOOP_COUNT 1000000
170
171 /* AXI DMA Specific Registers/Offsets */
172 #define XILINX_DMA_REG_SRCDSTADDR 0x18
173 #define XILINX_DMA_REG_BTT 0x28
174
175 /* AXI DMA Specific Masks/Bit fields */
176 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
177 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
178 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
179 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
180 #define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
181 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
182 #define XILINX_DMA_CR_COALESCE_SHIFT 16
183 #define XILINX_DMA_CR_DELAY_SHIFT 24
184 #define XILINX_DMA_BD_SOP BIT(27)
185 #define XILINX_DMA_BD_EOP BIT(26)
186 #define XILINX_DMA_BD_COMP_MASK BIT(31)
187 #define XILINX_DMA_COALESCE_MAX 255
188 #define XILINX_DMA_NUM_DESCS 512
189 #define XILINX_DMA_NUM_APP_WORDS 5
190
191 /* AXI CDMA Specific Registers/Offsets */
192 #define XILINX_CDMA_REG_SRCADDR 0x18
193 #define XILINX_CDMA_REG_DSTADDR 0x20
194
195 /* AXI CDMA Specific Masks */
196 #define XILINX_CDMA_CR_SGMODE BIT(3)
197
198 #define xilinx_prep_dma_addr_t(addr) \
199 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
200
201 /* AXI MCDMA Specific Registers/Offsets */
202 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
203 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
204 #define XILINX_MCDMA_CHEN_OFFSET 0x0008
205 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
206 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
207 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
208 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
209 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
210 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
211 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
212
213 /* AXI MCDMA Specific Masks/Shifts */
214 #define XILINX_MCDMA_COALESCE_SHIFT 16
215 #define XILINX_MCDMA_COALESCE_MAX 24
216 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
217 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
218 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
219 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
220 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
221 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
222 #define XILINX_MCDMA_BD_EOP BIT(30)
223 #define XILINX_MCDMA_BD_SOP BIT(31)
224
225 /**
226 * struct xilinx_vdma_desc_hw - Hardware Descriptor
227 * @next_desc: Next Descriptor Pointer @0x00
228 * @pad1: Reserved @0x04
229 * @buf_addr: Buffer address @0x08
230 * @buf_addr_msb: MSB of Buffer address @0x0C
231 * @vsize: Vertical Size @0x10
232 * @hsize: Horizontal Size @0x14
233 * @stride: Number of bytes between the first
234 * pixels of each horizontal line @0x18
235 */
236 struct xilinx_vdma_desc_hw {
237 u32 next_desc;
238 u32 pad1;
239 u32 buf_addr;
240 u32 buf_addr_msb;
241 u32 vsize;
242 u32 hsize;
243 u32 stride;
244 } __aligned(64);
245
246 /**
247 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
248 * @next_desc: Next Descriptor Pointer @0x00
249 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
250 * @buf_addr: Buffer address @0x08
251 * @buf_addr_msb: MSB of Buffer address @0x0C
252 * @reserved1: Reserved @0x10
253 * @reserved2: Reserved @0x14
254 * @control: Control field @0x18
255 * @status: Status field @0x1C
256 * @app: APP Fields @0x20 - 0x30
257 */
258 struct xilinx_axidma_desc_hw {
259 u32 next_desc;
260 u32 next_desc_msb;
261 u32 buf_addr;
262 u32 buf_addr_msb;
263 u32 reserved1;
264 u32 reserved2;
265 u32 control;
266 u32 status;
267 u32 app[XILINX_DMA_NUM_APP_WORDS];
268 } __aligned(64);
269
270 /**
271 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
272 * @next_desc: Next Descriptor Pointer @0x00
273 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
274 * @buf_addr: Buffer address @0x08
275 * @buf_addr_msb: MSB of Buffer address @0x0C
276 * @rsvd: Reserved field @0x10
277 * @control: Control Information field @0x14
278 * @status: Status field @0x18
279 * @sideband_status: Status of sideband signals @0x1C
280 * @app: APP Fields @0x20 - 0x30
281 */
282 struct xilinx_aximcdma_desc_hw {
283 u32 next_desc;
284 u32 next_desc_msb;
285 u32 buf_addr;
286 u32 buf_addr_msb;
287 u32 rsvd;
288 u32 control;
289 u32 status;
290 u32 sideband_status;
291 u32 app[XILINX_DMA_NUM_APP_WORDS];
292 } __aligned(64);
293
294 /**
295 * struct xilinx_cdma_desc_hw - Hardware Descriptor
296 * @next_desc: Next Descriptor Pointer @0x00
297 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
298 * @src_addr: Source address @0x08
299 * @src_addr_msb: Source address MSB @0x0C
300 * @dest_addr: Destination address @0x10
301 * @dest_addr_msb: Destination address MSB @0x14
302 * @control: Control field @0x18
303 * @status: Status field @0x1C
304 */
305 struct xilinx_cdma_desc_hw {
306 u32 next_desc;
307 u32 next_desc_msb;
308 u32 src_addr;
309 u32 src_addr_msb;
310 u32 dest_addr;
311 u32 dest_addr_msb;
312 u32 control;
313 u32 status;
314 } __aligned(64);
315
316 /**
317 * struct xilinx_vdma_tx_segment - Descriptor segment
318 * @hw: Hardware descriptor
319 * @node: Node in the descriptor segments list
320 * @phys: Physical address of segment
321 */
322 struct xilinx_vdma_tx_segment {
323 struct xilinx_vdma_desc_hw hw;
324 struct list_head node;
325 dma_addr_t phys;
326 } __aligned(64);
327
328 /**
329 * struct xilinx_axidma_tx_segment - Descriptor segment
330 * @hw: Hardware descriptor
331 * @node: Node in the descriptor segments list
332 * @phys: Physical address of segment
333 */
334 struct xilinx_axidma_tx_segment {
335 struct xilinx_axidma_desc_hw hw;
336 struct list_head node;
337 dma_addr_t phys;
338 } __aligned(64);
339
340 /**
341 * struct xilinx_aximcdma_tx_segment - Descriptor segment
342 * @hw: Hardware descriptor
343 * @node: Node in the descriptor segments list
344 * @phys: Physical address of segment
345 */
346 struct xilinx_aximcdma_tx_segment {
347 struct xilinx_aximcdma_desc_hw hw;
348 struct list_head node;
349 dma_addr_t phys;
350 } __aligned(64);
351
352 /**
353 * struct xilinx_cdma_tx_segment - Descriptor segment
354 * @hw: Hardware descriptor
355 * @node: Node in the descriptor segments list
356 * @phys: Physical address of segment
357 */
358 struct xilinx_cdma_tx_segment {
359 struct xilinx_cdma_desc_hw hw;
360 struct list_head node;
361 dma_addr_t phys;
362 } __aligned(64);
363
364 /**
365 * struct xilinx_dma_tx_descriptor - Per Transaction structure
366 * @async_tx: Async transaction descriptor
367 * @segments: TX segments list
368 * @node: Node in the channel descriptors list
369 * @cyclic: Check for cyclic transfers.
370 * @err: Whether the descriptor has an error.
371 * @residue: Residue of the completed descriptor
372 */
373 struct xilinx_dma_tx_descriptor {
374 struct dma_async_tx_descriptor async_tx;
375 struct list_head segments;
376 struct list_head node;
377 bool cyclic;
378 bool err;
379 u32 residue;
380 };
381
382 /**
383 * struct xilinx_dma_chan - Driver specific DMA channel structure
384 * @xdev: Driver specific device structure
385 * @ctrl_offset: Control registers offset
386 * @desc_offset: TX descriptor registers offset
387 * @lock: Descriptor operation lock
388 * @pending_list: Descriptors waiting
389 * @active_list: Descriptors ready to submit
390 * @done_list: Complete descriptors
391 * @free_seg_list: Free descriptors
392 * @common: DMA common channel
393 * @desc_pool: Descriptors pool
394 * @dev: The dma device
395 * @irq: Channel IRQ
396 * @id: Channel ID
397 * @direction: Transfer direction
398 * @num_frms: Number of frames
399 * @has_sg: Support scatter transfers
400 * @cyclic: Check for cyclic transfers.
401 * @genlock: Support genlock mode
402 * @err: Channel has errors
403 * @idle: Check for channel idle
404 * @terminating: Check for channel being synchronized by user
405 * @tasklet: Cleanup work after irq
406 * @config: Device configuration info
407 * @flush_on_fsync: Flush on Frame sync
408 * @desc_pendingcount: Descriptor pending count
409 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
410 * @desc_submitcount: Descriptor h/w submitted count
411 * @seg_v: Statically allocated segments base
412 * @seg_mv: Statically allocated segments base for MCDMA
413 * @seg_p: Physical allocated segments base
414 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
415 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
416 * @start_transfer: Differentiate b/w DMA IP's transfer
417 * @stop_transfer: Differentiate b/w DMA IP's quiesce
418 * @tdest: TDEST value for mcdma
419 * @has_vflip: S2MM vertical flip
420 * @irq_delay: Interrupt delay timeout
421 */
422 struct xilinx_dma_chan {
423 struct xilinx_dma_device *xdev;
424 u32 ctrl_offset;
425 u32 desc_offset;
426 spinlock_t lock;
427 struct list_head pending_list;
428 struct list_head active_list;
429 struct list_head done_list;
430 struct list_head free_seg_list;
431 struct dma_chan common;
432 struct dma_pool *desc_pool;
433 struct device *dev;
434 int irq;
435 int id;
436 enum dma_transfer_direction direction;
437 int num_frms;
438 bool has_sg;
439 bool cyclic;
440 bool genlock;
441 bool err;
442 bool idle;
443 bool terminating;
444 struct tasklet_struct tasklet;
445 struct xilinx_vdma_config config;
446 bool flush_on_fsync;
447 u32 desc_pendingcount;
448 bool ext_addr;
449 u32 desc_submitcount;
450 struct xilinx_axidma_tx_segment *seg_v;
451 struct xilinx_aximcdma_tx_segment *seg_mv;
452 dma_addr_t seg_p;
453 struct xilinx_axidma_tx_segment *cyclic_seg_v;
454 dma_addr_t cyclic_seg_p;
455 void (*start_transfer)(struct xilinx_dma_chan *chan);
456 int (*stop_transfer)(struct xilinx_dma_chan *chan);
457 u16 tdest;
458 bool has_vflip;
459 u8 irq_delay;
460 };
461
462 /**
463 * enum xdma_ip_type - DMA IP type.
464 *
465 * @XDMA_TYPE_AXIDMA: Axi dma ip.
466 * @XDMA_TYPE_CDMA: Axi cdma ip.
467 * @XDMA_TYPE_VDMA: Axi vdma ip.
468 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
469 *
470 */
471 enum xdma_ip_type {
472 XDMA_TYPE_AXIDMA = 0,
473 XDMA_TYPE_CDMA,
474 XDMA_TYPE_VDMA,
475 XDMA_TYPE_AXIMCDMA
476 };
477
478 struct xilinx_dma_config {
479 enum xdma_ip_type dmatype;
480 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
481 struct clk **tx_clk, struct clk **txs_clk,
482 struct clk **rx_clk, struct clk **rxs_clk);
483 irqreturn_t (*irq_handler)(int irq, void *data);
484 const int max_channels;
485 };
486
487 /**
488 * struct xilinx_dma_device - DMA device structure
489 * @regs: I/O mapped base address
490 * @dev: Device Structure
491 * @common: DMA device structure
492 * @chan: Driver specific DMA channel
493 * @flush_on_fsync: Flush on frame sync
494 * @ext_addr: Indicates 64 bit addressing is supported by dma device
495 * @pdev: Platform device structure pointer
496 * @dma_config: DMA config structure
497 * @axi_clk: DMA Axi4-lite interace clock
498 * @tx_clk: DMA mm2s clock
499 * @txs_clk: DMA mm2s stream clock
500 * @rx_clk: DMA s2mm clock
501 * @rxs_clk: DMA s2mm stream clock
502 * @s2mm_chan_id: DMA s2mm channel identifier
503 * @mm2s_chan_id: DMA mm2s channel identifier
504 * @max_buffer_len: Max buffer length
505 * @has_axistream_connected: AXI DMA connected to AXI Stream IP
506 */
507 struct xilinx_dma_device {
508 void __iomem *regs;
509 struct device *dev;
510 struct dma_device common;
511 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
512 u32 flush_on_fsync;
513 bool ext_addr;
514 struct platform_device *pdev;
515 const struct xilinx_dma_config *dma_config;
516 struct clk *axi_clk;
517 struct clk *tx_clk;
518 struct clk *txs_clk;
519 struct clk *rx_clk;
520 struct clk *rxs_clk;
521 u32 s2mm_chan_id;
522 u32 mm2s_chan_id;
523 u32 max_buffer_len;
524 bool has_axistream_connected;
525 };
526
527 /* Macros */
528 #define to_xilinx_chan(chan) \
529 container_of(chan, struct xilinx_dma_chan, common)
530 #define to_dma_tx_descriptor(tx) \
531 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
532 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
533 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
534 val, cond, delay_us, timeout_us)
535
536 /* IO accessors */
dma_read(struct xilinx_dma_chan * chan,u32 reg)537 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
538 {
539 return ioread32(chan->xdev->regs + reg);
540 }
541
dma_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)542 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
543 {
544 iowrite32(value, chan->xdev->regs + reg);
545 }
546
vdma_desc_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)547 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
548 u32 value)
549 {
550 dma_write(chan, chan->desc_offset + reg, value);
551 }
552
dma_ctrl_read(struct xilinx_dma_chan * chan,u32 reg)553 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
554 {
555 return dma_read(chan, chan->ctrl_offset + reg);
556 }
557
dma_ctrl_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)558 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
559 u32 value)
560 {
561 dma_write(chan, chan->ctrl_offset + reg, value);
562 }
563
dma_ctrl_clr(struct xilinx_dma_chan * chan,u32 reg,u32 clr)564 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
565 u32 clr)
566 {
567 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
568 }
569
dma_ctrl_set(struct xilinx_dma_chan * chan,u32 reg,u32 set)570 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
571 u32 set)
572 {
573 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
574 }
575
576 /**
577 * vdma_desc_write_64 - 64-bit descriptor write
578 * @chan: Driver specific VDMA channel
579 * @reg: Register to write
580 * @value_lsb: lower address of the descriptor.
581 * @value_msb: upper address of the descriptor.
582 *
583 * Since vdma driver is trying to write to a register offset which is not a
584 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
585 * instead of a single 64 bit register write.
586 */
vdma_desc_write_64(struct xilinx_dma_chan * chan,u32 reg,u32 value_lsb,u32 value_msb)587 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
588 u32 value_lsb, u32 value_msb)
589 {
590 /* Write the lsb 32 bits*/
591 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
592
593 /* Write the msb 32 bits */
594 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
595 }
596
dma_writeq(struct xilinx_dma_chan * chan,u32 reg,u64 value)597 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
598 {
599 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
600 }
601
xilinx_write(struct xilinx_dma_chan * chan,u32 reg,dma_addr_t addr)602 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
603 dma_addr_t addr)
604 {
605 if (chan->ext_addr)
606 dma_writeq(chan, reg, addr);
607 else
608 dma_ctrl_write(chan, reg, addr);
609 }
610
xilinx_axidma_buf(struct xilinx_dma_chan * chan,struct xilinx_axidma_desc_hw * hw,dma_addr_t buf_addr,size_t sg_used,size_t period_len)611 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
612 struct xilinx_axidma_desc_hw *hw,
613 dma_addr_t buf_addr, size_t sg_used,
614 size_t period_len)
615 {
616 if (chan->ext_addr) {
617 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
618 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
619 period_len);
620 } else {
621 hw->buf_addr = buf_addr + sg_used + period_len;
622 }
623 }
624
xilinx_aximcdma_buf(struct xilinx_dma_chan * chan,struct xilinx_aximcdma_desc_hw * hw,dma_addr_t buf_addr,size_t sg_used)625 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
626 struct xilinx_aximcdma_desc_hw *hw,
627 dma_addr_t buf_addr, size_t sg_used)
628 {
629 if (chan->ext_addr) {
630 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
631 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
632 } else {
633 hw->buf_addr = buf_addr + sg_used;
634 }
635 }
636
637 /**
638 * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
639 * @tx: async transaction descriptor
640 * @payload_len: metadata payload length
641 * @max_len: metadata max length
642 * Return: The app field pointer.
643 */
xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor * tx,size_t * payload_len,size_t * max_len)644 static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
645 size_t *payload_len, size_t *max_len)
646 {
647 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
648 struct xilinx_axidma_tx_segment *seg;
649
650 *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
651 seg = list_first_entry(&desc->segments,
652 struct xilinx_axidma_tx_segment, node);
653 return seg->hw.app;
654 }
655
656 static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
657 .get_ptr = xilinx_dma_get_metadata_ptr,
658 };
659
660 /* -----------------------------------------------------------------------------
661 * Descriptors and segments alloc and free
662 */
663
664 /**
665 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
666 * @chan: Driver specific DMA channel
667 *
668 * Return: The allocated segment on success and NULL on failure.
669 */
670 static struct xilinx_vdma_tx_segment *
xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan * chan)671 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
672 {
673 struct xilinx_vdma_tx_segment *segment;
674 dma_addr_t phys;
675
676 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
677 if (!segment)
678 return NULL;
679
680 segment->phys = phys;
681
682 return segment;
683 }
684
685 /**
686 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
687 * @chan: Driver specific DMA channel
688 *
689 * Return: The allocated segment on success and NULL on failure.
690 */
691 static struct xilinx_cdma_tx_segment *
xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan * chan)692 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
693 {
694 struct xilinx_cdma_tx_segment *segment;
695 dma_addr_t phys;
696
697 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
698 if (!segment)
699 return NULL;
700
701 segment->phys = phys;
702
703 return segment;
704 }
705
706 /**
707 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
708 * @chan: Driver specific DMA channel
709 *
710 * Return: The allocated segment on success and NULL on failure.
711 */
712 static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan * chan)713 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
714 {
715 struct xilinx_axidma_tx_segment *segment = NULL;
716 unsigned long flags;
717
718 spin_lock_irqsave(&chan->lock, flags);
719 if (!list_empty(&chan->free_seg_list)) {
720 segment = list_first_entry(&chan->free_seg_list,
721 struct xilinx_axidma_tx_segment,
722 node);
723 list_del(&segment->node);
724 }
725 spin_unlock_irqrestore(&chan->lock, flags);
726
727 if (!segment)
728 dev_dbg(chan->dev, "Could not find free tx segment\n");
729
730 return segment;
731 }
732
733 /**
734 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
735 * @chan: Driver specific DMA channel
736 *
737 * Return: The allocated segment on success and NULL on failure.
738 */
739 static struct xilinx_aximcdma_tx_segment *
xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan * chan)740 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
741 {
742 struct xilinx_aximcdma_tx_segment *segment = NULL;
743 unsigned long flags;
744
745 spin_lock_irqsave(&chan->lock, flags);
746 if (!list_empty(&chan->free_seg_list)) {
747 segment = list_first_entry(&chan->free_seg_list,
748 struct xilinx_aximcdma_tx_segment,
749 node);
750 list_del(&segment->node);
751 }
752 spin_unlock_irqrestore(&chan->lock, flags);
753
754 return segment;
755 }
756
xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw * hw)757 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
758 {
759 u32 next_desc = hw->next_desc;
760 u32 next_desc_msb = hw->next_desc_msb;
761
762 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
763
764 hw->next_desc = next_desc;
765 hw->next_desc_msb = next_desc_msb;
766 }
767
xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw * hw)768 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
769 {
770 u32 next_desc = hw->next_desc;
771 u32 next_desc_msb = hw->next_desc_msb;
772
773 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
774
775 hw->next_desc = next_desc;
776 hw->next_desc_msb = next_desc_msb;
777 }
778
779 /**
780 * xilinx_dma_free_tx_segment - Free transaction segment
781 * @chan: Driver specific DMA channel
782 * @segment: DMA transaction segment
783 */
xilinx_dma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_axidma_tx_segment * segment)784 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
785 struct xilinx_axidma_tx_segment *segment)
786 {
787 xilinx_dma_clean_hw_desc(&segment->hw);
788
789 list_add_tail(&segment->node, &chan->free_seg_list);
790 }
791
792 /**
793 * xilinx_mcdma_free_tx_segment - Free transaction segment
794 * @chan: Driver specific DMA channel
795 * @segment: DMA transaction segment
796 */
xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_aximcdma_tx_segment * segment)797 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
798 struct xilinx_aximcdma_tx_segment *
799 segment)
800 {
801 xilinx_mcdma_clean_hw_desc(&segment->hw);
802
803 list_add_tail(&segment->node, &chan->free_seg_list);
804 }
805
806 /**
807 * xilinx_cdma_free_tx_segment - Free transaction segment
808 * @chan: Driver specific DMA channel
809 * @segment: DMA transaction segment
810 */
xilinx_cdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_cdma_tx_segment * segment)811 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
812 struct xilinx_cdma_tx_segment *segment)
813 {
814 dma_pool_free(chan->desc_pool, segment, segment->phys);
815 }
816
817 /**
818 * xilinx_vdma_free_tx_segment - Free transaction segment
819 * @chan: Driver specific DMA channel
820 * @segment: DMA transaction segment
821 */
xilinx_vdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_vdma_tx_segment * segment)822 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
823 struct xilinx_vdma_tx_segment *segment)
824 {
825 dma_pool_free(chan->desc_pool, segment, segment->phys);
826 }
827
828 /**
829 * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
830 * @chan: Driver specific DMA channel
831 *
832 * Return: The allocated descriptor on success and NULL on failure.
833 */
834 static struct xilinx_dma_tx_descriptor *
xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan * chan)835 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
836 {
837 struct xilinx_dma_tx_descriptor *desc;
838
839 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
840 if (!desc)
841 return NULL;
842
843 INIT_LIST_HEAD(&desc->segments);
844
845 return desc;
846 }
847
848 /**
849 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
850 * @chan: Driver specific DMA channel
851 * @desc: DMA transaction descriptor
852 */
853 static void
xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)854 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
855 struct xilinx_dma_tx_descriptor *desc)
856 {
857 struct xilinx_vdma_tx_segment *segment, *next;
858 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
859 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
860 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
861
862 if (!desc)
863 return;
864
865 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
866 list_for_each_entry_safe(segment, next, &desc->segments, node) {
867 list_del(&segment->node);
868 xilinx_vdma_free_tx_segment(chan, segment);
869 }
870 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
871 list_for_each_entry_safe(cdma_segment, cdma_next,
872 &desc->segments, node) {
873 list_del(&cdma_segment->node);
874 xilinx_cdma_free_tx_segment(chan, cdma_segment);
875 }
876 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
877 list_for_each_entry_safe(axidma_segment, axidma_next,
878 &desc->segments, node) {
879 list_del(&axidma_segment->node);
880 xilinx_dma_free_tx_segment(chan, axidma_segment);
881 }
882 } else {
883 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
884 &desc->segments, node) {
885 list_del(&aximcdma_segment->node);
886 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
887 }
888 }
889
890 kfree(desc);
891 }
892
893 /* Required functions */
894
895 /**
896 * xilinx_dma_free_desc_list - Free descriptors list
897 * @chan: Driver specific DMA channel
898 * @list: List to parse and delete the descriptor
899 */
xilinx_dma_free_desc_list(struct xilinx_dma_chan * chan,struct list_head * list)900 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
901 struct list_head *list)
902 {
903 struct xilinx_dma_tx_descriptor *desc, *next;
904
905 list_for_each_entry_safe(desc, next, list, node) {
906 list_del(&desc->node);
907 xilinx_dma_free_tx_descriptor(chan, desc);
908 }
909 }
910
911 /**
912 * xilinx_dma_free_descriptors - Free channel descriptors
913 * @chan: Driver specific DMA channel
914 */
xilinx_dma_free_descriptors(struct xilinx_dma_chan * chan)915 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
916 {
917 unsigned long flags;
918
919 spin_lock_irqsave(&chan->lock, flags);
920
921 xilinx_dma_free_desc_list(chan, &chan->pending_list);
922 xilinx_dma_free_desc_list(chan, &chan->done_list);
923 xilinx_dma_free_desc_list(chan, &chan->active_list);
924
925 spin_unlock_irqrestore(&chan->lock, flags);
926 }
927
928 /**
929 * xilinx_dma_free_chan_resources - Free channel resources
930 * @dchan: DMA channel
931 */
xilinx_dma_free_chan_resources(struct dma_chan * dchan)932 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
933 {
934 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
935 unsigned long flags;
936
937 dev_dbg(chan->dev, "Free all channel resources.\n");
938
939 xilinx_dma_free_descriptors(chan);
940
941 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
942 spin_lock_irqsave(&chan->lock, flags);
943 INIT_LIST_HEAD(&chan->free_seg_list);
944 spin_unlock_irqrestore(&chan->lock, flags);
945
946 /* Free memory that is allocated for BD */
947 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
948 XILINX_DMA_NUM_DESCS, chan->seg_v,
949 chan->seg_p);
950
951 /* Free Memory that is allocated for cyclic DMA Mode */
952 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
953 chan->cyclic_seg_v, chan->cyclic_seg_p);
954 }
955
956 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
957 spin_lock_irqsave(&chan->lock, flags);
958 INIT_LIST_HEAD(&chan->free_seg_list);
959 spin_unlock_irqrestore(&chan->lock, flags);
960
961 /* Free memory that is allocated for BD */
962 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
963 XILINX_DMA_NUM_DESCS, chan->seg_mv,
964 chan->seg_p);
965 }
966
967 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
968 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
969 dma_pool_destroy(chan->desc_pool);
970 chan->desc_pool = NULL;
971 }
972
973 }
974
975 /**
976 * xilinx_dma_get_residue - Compute residue for a given descriptor
977 * @chan: Driver specific dma channel
978 * @desc: dma transaction descriptor
979 *
980 * Return: The number of residue bytes for the descriptor.
981 */
xilinx_dma_get_residue(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)982 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
983 struct xilinx_dma_tx_descriptor *desc)
984 {
985 struct xilinx_cdma_tx_segment *cdma_seg;
986 struct xilinx_axidma_tx_segment *axidma_seg;
987 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
988 struct xilinx_cdma_desc_hw *cdma_hw;
989 struct xilinx_axidma_desc_hw *axidma_hw;
990 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
991 struct list_head *entry;
992 u32 residue = 0;
993
994 list_for_each(entry, &desc->segments) {
995 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
996 cdma_seg = list_entry(entry,
997 struct xilinx_cdma_tx_segment,
998 node);
999 cdma_hw = &cdma_seg->hw;
1000 residue += (cdma_hw->control - cdma_hw->status) &
1001 chan->xdev->max_buffer_len;
1002 } else if (chan->xdev->dma_config->dmatype ==
1003 XDMA_TYPE_AXIDMA) {
1004 axidma_seg = list_entry(entry,
1005 struct xilinx_axidma_tx_segment,
1006 node);
1007 axidma_hw = &axidma_seg->hw;
1008 residue += (axidma_hw->control - axidma_hw->status) &
1009 chan->xdev->max_buffer_len;
1010 } else {
1011 aximcdma_seg =
1012 list_entry(entry,
1013 struct xilinx_aximcdma_tx_segment,
1014 node);
1015 aximcdma_hw = &aximcdma_seg->hw;
1016 residue +=
1017 (aximcdma_hw->control - aximcdma_hw->status) &
1018 chan->xdev->max_buffer_len;
1019 }
1020 }
1021
1022 return residue;
1023 }
1024
1025 /**
1026 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
1027 * @chan: Driver specific dma channel
1028 * @desc: dma transaction descriptor
1029 * @flags: flags for spin lock
1030 */
xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc,unsigned long * flags)1031 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
1032 struct xilinx_dma_tx_descriptor *desc,
1033 unsigned long *flags)
1034 {
1035 struct dmaengine_desc_callback cb;
1036
1037 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1038 if (dmaengine_desc_callback_valid(&cb)) {
1039 spin_unlock_irqrestore(&chan->lock, *flags);
1040 dmaengine_desc_callback_invoke(&cb, NULL);
1041 spin_lock_irqsave(&chan->lock, *flags);
1042 }
1043 }
1044
1045 /**
1046 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
1047 * @chan: Driver specific DMA channel
1048 */
xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan * chan)1049 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1050 {
1051 struct xilinx_dma_tx_descriptor *desc, *next;
1052 unsigned long flags;
1053
1054 spin_lock_irqsave(&chan->lock, flags);
1055
1056 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1057 struct dmaengine_result result;
1058
1059 if (desc->cyclic) {
1060 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1061 break;
1062 }
1063
1064 /* Remove from the list of running transactions */
1065 list_del(&desc->node);
1066
1067 if (unlikely(desc->err)) {
1068 if (chan->direction == DMA_DEV_TO_MEM)
1069 result.result = DMA_TRANS_READ_FAILED;
1070 else
1071 result.result = DMA_TRANS_WRITE_FAILED;
1072 } else {
1073 result.result = DMA_TRANS_NOERROR;
1074 }
1075
1076 result.residue = desc->residue;
1077
1078 /* Run the link descriptor callback function */
1079 spin_unlock_irqrestore(&chan->lock, flags);
1080 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1081 spin_lock_irqsave(&chan->lock, flags);
1082
1083 /* Run any dependencies, then free the descriptor */
1084 dma_run_dependencies(&desc->async_tx);
1085 xilinx_dma_free_tx_descriptor(chan, desc);
1086
1087 /*
1088 * While we ran a callback the user called a terminate function,
1089 * which takes care of cleaning up any remaining descriptors
1090 */
1091 if (chan->terminating)
1092 break;
1093 }
1094
1095 spin_unlock_irqrestore(&chan->lock, flags);
1096 }
1097
1098 /**
1099 * xilinx_dma_do_tasklet - Schedule completion tasklet
1100 * @t: Pointer to the Xilinx DMA channel structure
1101 */
xilinx_dma_do_tasklet(struct tasklet_struct * t)1102 static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1103 {
1104 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1105
1106 xilinx_dma_chan_desc_cleanup(chan);
1107 }
1108
1109 /**
1110 * xilinx_dma_alloc_chan_resources - Allocate channel resources
1111 * @dchan: DMA channel
1112 *
1113 * Return: '0' on success and failure value on error
1114 */
xilinx_dma_alloc_chan_resources(struct dma_chan * dchan)1115 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1116 {
1117 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1118 int i;
1119
1120 /* Has this channel already been allocated? */
1121 if (chan->desc_pool)
1122 return 0;
1123
1124 /*
1125 * We need the descriptor to be aligned to 64bytes
1126 * for meeting Xilinx VDMA specification requirement.
1127 */
1128 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1129 /* Allocate the buffer descriptors. */
1130 chan->seg_v = dma_alloc_coherent(chan->dev,
1131 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1132 &chan->seg_p, GFP_KERNEL);
1133 if (!chan->seg_v) {
1134 dev_err(chan->dev,
1135 "unable to allocate channel %d descriptors\n",
1136 chan->id);
1137 return -ENOMEM;
1138 }
1139 /*
1140 * For cyclic DMA mode we need to program the tail Descriptor
1141 * register with a value which is not a part of the BD chain
1142 * so allocating a desc segment during channel allocation for
1143 * programming tail descriptor.
1144 */
1145 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1146 sizeof(*chan->cyclic_seg_v),
1147 &chan->cyclic_seg_p,
1148 GFP_KERNEL);
1149 if (!chan->cyclic_seg_v) {
1150 dev_err(chan->dev,
1151 "unable to allocate desc segment for cyclic DMA\n");
1152 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1153 XILINX_DMA_NUM_DESCS, chan->seg_v,
1154 chan->seg_p);
1155 return -ENOMEM;
1156 }
1157 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1158
1159 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1160 chan->seg_v[i].hw.next_desc =
1161 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1162 ((i + 1) % XILINX_DMA_NUM_DESCS));
1163 chan->seg_v[i].hw.next_desc_msb =
1164 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1165 ((i + 1) % XILINX_DMA_NUM_DESCS));
1166 chan->seg_v[i].phys = chan->seg_p +
1167 sizeof(*chan->seg_v) * i;
1168 list_add_tail(&chan->seg_v[i].node,
1169 &chan->free_seg_list);
1170 }
1171 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1172 /* Allocate the buffer descriptors. */
1173 chan->seg_mv = dma_alloc_coherent(chan->dev,
1174 sizeof(*chan->seg_mv) *
1175 XILINX_DMA_NUM_DESCS,
1176 &chan->seg_p, GFP_KERNEL);
1177 if (!chan->seg_mv) {
1178 dev_err(chan->dev,
1179 "unable to allocate channel %d descriptors\n",
1180 chan->id);
1181 return -ENOMEM;
1182 }
1183 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1184 chan->seg_mv[i].hw.next_desc =
1185 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1186 ((i + 1) % XILINX_DMA_NUM_DESCS));
1187 chan->seg_mv[i].hw.next_desc_msb =
1188 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1189 ((i + 1) % XILINX_DMA_NUM_DESCS));
1190 chan->seg_mv[i].phys = chan->seg_p +
1191 sizeof(*chan->seg_mv) * i;
1192 list_add_tail(&chan->seg_mv[i].node,
1193 &chan->free_seg_list);
1194 }
1195 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1196 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1197 chan->dev,
1198 sizeof(struct xilinx_cdma_tx_segment),
1199 __alignof__(struct xilinx_cdma_tx_segment),
1200 0);
1201 } else {
1202 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1203 chan->dev,
1204 sizeof(struct xilinx_vdma_tx_segment),
1205 __alignof__(struct xilinx_vdma_tx_segment),
1206 0);
1207 }
1208
1209 if (!chan->desc_pool &&
1210 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1211 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1212 dev_err(chan->dev,
1213 "unable to allocate channel %d descriptor pool\n",
1214 chan->id);
1215 return -ENOMEM;
1216 }
1217
1218 dma_cookie_init(dchan);
1219
1220 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1221 /* For AXI DMA resetting once channel will reset the
1222 * other channel as well so enable the interrupts here.
1223 */
1224 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1225 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1226 }
1227
1228 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1229 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1230 XILINX_CDMA_CR_SGMODE);
1231
1232 return 0;
1233 }
1234
1235 /**
1236 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1237 * @chan: Driver specific DMA channel
1238 * @size: Total data that needs to be copied
1239 * @done: Amount of data that has been already copied
1240 *
1241 * Return: Amount of data that has to be copied
1242 */
xilinx_dma_calc_copysize(struct xilinx_dma_chan * chan,int size,int done)1243 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1244 int size, int done)
1245 {
1246 size_t copy;
1247
1248 copy = min_t(size_t, size - done,
1249 chan->xdev->max_buffer_len);
1250
1251 if ((copy + done < size) &&
1252 chan->xdev->common.copy_align) {
1253 /*
1254 * If this is not the last descriptor, make sure
1255 * the next one will be properly aligned
1256 */
1257 copy = rounddown(copy,
1258 (1 << chan->xdev->common.copy_align));
1259 }
1260 return copy;
1261 }
1262
1263 /**
1264 * xilinx_dma_tx_status - Get DMA transaction status
1265 * @dchan: DMA channel
1266 * @cookie: Transaction identifier
1267 * @txstate: Transaction state
1268 *
1269 * Return: DMA transaction status
1270 */
xilinx_dma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1271 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1272 dma_cookie_t cookie,
1273 struct dma_tx_state *txstate)
1274 {
1275 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1276 struct xilinx_dma_tx_descriptor *desc;
1277 enum dma_status ret;
1278 unsigned long flags;
1279 u32 residue = 0;
1280
1281 ret = dma_cookie_status(dchan, cookie, txstate);
1282 if (ret == DMA_COMPLETE || !txstate)
1283 return ret;
1284
1285 spin_lock_irqsave(&chan->lock, flags);
1286 if (!list_empty(&chan->active_list)) {
1287 desc = list_last_entry(&chan->active_list,
1288 struct xilinx_dma_tx_descriptor, node);
1289 /*
1290 * VDMA and simple mode do not support residue reporting, so the
1291 * residue field will always be 0.
1292 */
1293 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1294 residue = xilinx_dma_get_residue(chan, desc);
1295 }
1296 spin_unlock_irqrestore(&chan->lock, flags);
1297
1298 dma_set_residue(txstate, residue);
1299
1300 return ret;
1301 }
1302
1303 /**
1304 * xilinx_dma_stop_transfer - Halt DMA channel
1305 * @chan: Driver specific DMA channel
1306 *
1307 * Return: '0' on success and failure value on error
1308 */
xilinx_dma_stop_transfer(struct xilinx_dma_chan * chan)1309 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1310 {
1311 u32 val;
1312
1313 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1314
1315 /* Wait for the hardware to halt */
1316 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1317 val & XILINX_DMA_DMASR_HALTED, 0,
1318 XILINX_DMA_LOOP_COUNT);
1319 }
1320
1321 /**
1322 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1323 * @chan: Driver specific DMA channel
1324 *
1325 * Return: '0' on success and failure value on error
1326 */
xilinx_cdma_stop_transfer(struct xilinx_dma_chan * chan)1327 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1328 {
1329 u32 val;
1330
1331 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1332 val & XILINX_DMA_DMASR_IDLE, 0,
1333 XILINX_DMA_LOOP_COUNT);
1334 }
1335
1336 /**
1337 * xilinx_dma_start - Start DMA channel
1338 * @chan: Driver specific DMA channel
1339 */
xilinx_dma_start(struct xilinx_dma_chan * chan)1340 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1341 {
1342 int err;
1343 u32 val;
1344
1345 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1346
1347 /* Wait for the hardware to start */
1348 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1349 !(val & XILINX_DMA_DMASR_HALTED), 0,
1350 XILINX_DMA_LOOP_COUNT);
1351
1352 if (err) {
1353 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1354 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1355
1356 chan->err = true;
1357 }
1358 }
1359
1360 /**
1361 * xilinx_vdma_start_transfer - Starts VDMA transfer
1362 * @chan: Driver specific channel struct pointer
1363 */
xilinx_vdma_start_transfer(struct xilinx_dma_chan * chan)1364 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1365 {
1366 struct xilinx_vdma_config *config = &chan->config;
1367 struct xilinx_dma_tx_descriptor *desc;
1368 u32 reg, j;
1369 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1370 int i = 0;
1371
1372 /* This function was invoked with lock held */
1373 if (chan->err)
1374 return;
1375
1376 if (!chan->idle)
1377 return;
1378
1379 if (list_empty(&chan->pending_list))
1380 return;
1381
1382 desc = list_first_entry(&chan->pending_list,
1383 struct xilinx_dma_tx_descriptor, node);
1384
1385 /* Configure the hardware using info in the config structure */
1386 if (chan->has_vflip) {
1387 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1388 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1389 reg |= config->vflip_en;
1390 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1391 reg);
1392 }
1393
1394 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1395
1396 if (config->frm_cnt_en)
1397 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1398 else
1399 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1400
1401 /* If not parking, enable circular mode */
1402 if (config->park)
1403 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1404 else
1405 reg |= XILINX_DMA_DMACR_CIRC_EN;
1406
1407 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1408
1409 if (config->park) {
1410 j = chan->desc_submitcount;
1411 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1412 if (chan->direction == DMA_MEM_TO_DEV) {
1413 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1414 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1415 } else {
1416 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1417 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1418 }
1419 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1420 }
1421
1422 /* Start the hardware */
1423 xilinx_dma_start(chan);
1424
1425 if (chan->err)
1426 return;
1427
1428 /* Start the transfer */
1429 if (chan->desc_submitcount < chan->num_frms)
1430 i = chan->desc_submitcount;
1431
1432 list_for_each_entry(segment, &desc->segments, node) {
1433 if (chan->ext_addr)
1434 vdma_desc_write_64(chan,
1435 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1436 segment->hw.buf_addr,
1437 segment->hw.buf_addr_msb);
1438 else
1439 vdma_desc_write(chan,
1440 XILINX_VDMA_REG_START_ADDRESS(i++),
1441 segment->hw.buf_addr);
1442
1443 last = segment;
1444 }
1445
1446 if (!last)
1447 return;
1448
1449 /* HW expects these parameters to be same for one transaction */
1450 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1451 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1452 last->hw.stride);
1453 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1454
1455 chan->desc_submitcount++;
1456 chan->desc_pendingcount--;
1457 list_move_tail(&desc->node, &chan->active_list);
1458 if (chan->desc_submitcount == chan->num_frms)
1459 chan->desc_submitcount = 0;
1460
1461 chan->idle = false;
1462 }
1463
1464 /**
1465 * xilinx_cdma_start_transfer - Starts cdma transfer
1466 * @chan: Driver specific channel struct pointer
1467 */
xilinx_cdma_start_transfer(struct xilinx_dma_chan * chan)1468 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1469 {
1470 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1471 struct xilinx_cdma_tx_segment *tail_segment;
1472 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1473
1474 if (chan->err)
1475 return;
1476
1477 if (!chan->idle)
1478 return;
1479
1480 if (list_empty(&chan->pending_list))
1481 return;
1482
1483 head_desc = list_first_entry(&chan->pending_list,
1484 struct xilinx_dma_tx_descriptor, node);
1485 tail_desc = list_last_entry(&chan->pending_list,
1486 struct xilinx_dma_tx_descriptor, node);
1487 tail_segment = list_last_entry(&tail_desc->segments,
1488 struct xilinx_cdma_tx_segment, node);
1489
1490 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1491 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1492 ctrl_reg |= chan->desc_pendingcount <<
1493 XILINX_DMA_CR_COALESCE_SHIFT;
1494 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1495 }
1496
1497 if (chan->has_sg) {
1498 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1499 XILINX_CDMA_CR_SGMODE);
1500
1501 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1502 XILINX_CDMA_CR_SGMODE);
1503
1504 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1505 head_desc->async_tx.phys);
1506
1507 /* Update tail ptr register which will start the transfer */
1508 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1509 tail_segment->phys);
1510 } else {
1511 /* In simple mode */
1512 struct xilinx_cdma_tx_segment *segment;
1513 struct xilinx_cdma_desc_hw *hw;
1514
1515 segment = list_first_entry(&head_desc->segments,
1516 struct xilinx_cdma_tx_segment,
1517 node);
1518
1519 hw = &segment->hw;
1520
1521 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1522 xilinx_prep_dma_addr_t(hw->src_addr));
1523 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1524 xilinx_prep_dma_addr_t(hw->dest_addr));
1525
1526 /* Start the transfer */
1527 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1528 hw->control & chan->xdev->max_buffer_len);
1529 }
1530
1531 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1532 chan->desc_pendingcount = 0;
1533 chan->idle = false;
1534 }
1535
1536 /**
1537 * xilinx_dma_start_transfer - Starts DMA transfer
1538 * @chan: Driver specific channel struct pointer
1539 */
xilinx_dma_start_transfer(struct xilinx_dma_chan * chan)1540 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1541 {
1542 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1543 struct xilinx_axidma_tx_segment *tail_segment;
1544 u32 reg;
1545
1546 if (chan->err)
1547 return;
1548
1549 if (list_empty(&chan->pending_list))
1550 return;
1551
1552 if (!chan->idle)
1553 return;
1554
1555 head_desc = list_first_entry(&chan->pending_list,
1556 struct xilinx_dma_tx_descriptor, node);
1557 tail_desc = list_last_entry(&chan->pending_list,
1558 struct xilinx_dma_tx_descriptor, node);
1559 tail_segment = list_last_entry(&tail_desc->segments,
1560 struct xilinx_axidma_tx_segment, node);
1561
1562 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1563
1564 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1565 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1566 reg |= chan->desc_pendingcount <<
1567 XILINX_DMA_CR_COALESCE_SHIFT;
1568 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1569 }
1570
1571 if (chan->has_sg)
1572 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1573 head_desc->async_tx.phys);
1574 reg &= ~XILINX_DMA_CR_DELAY_MAX;
1575 reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
1576 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1577
1578 xilinx_dma_start(chan);
1579
1580 if (chan->err)
1581 return;
1582
1583 /* Start the transfer */
1584 if (chan->has_sg) {
1585 if (chan->cyclic)
1586 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1587 chan->cyclic_seg_v->phys);
1588 else
1589 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1590 tail_segment->phys);
1591 } else {
1592 struct xilinx_axidma_tx_segment *segment;
1593 struct xilinx_axidma_desc_hw *hw;
1594
1595 segment = list_first_entry(&head_desc->segments,
1596 struct xilinx_axidma_tx_segment,
1597 node);
1598 hw = &segment->hw;
1599
1600 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1601 xilinx_prep_dma_addr_t(hw->buf_addr));
1602
1603 /* Start the transfer */
1604 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1605 hw->control & chan->xdev->max_buffer_len);
1606 }
1607
1608 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1609 chan->desc_pendingcount = 0;
1610 chan->idle = false;
1611 }
1612
1613 /**
1614 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1615 * @chan: Driver specific channel struct pointer
1616 */
xilinx_mcdma_start_transfer(struct xilinx_dma_chan * chan)1617 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1618 {
1619 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1620 struct xilinx_aximcdma_tx_segment *tail_segment;
1621 u32 reg;
1622
1623 /*
1624 * lock has been held by calling functions, so we don't need it
1625 * to take it here again.
1626 */
1627
1628 if (chan->err)
1629 return;
1630
1631 if (!chan->idle)
1632 return;
1633
1634 if (list_empty(&chan->pending_list))
1635 return;
1636
1637 head_desc = list_first_entry(&chan->pending_list,
1638 struct xilinx_dma_tx_descriptor, node);
1639 tail_desc = list_last_entry(&chan->pending_list,
1640 struct xilinx_dma_tx_descriptor, node);
1641 tail_segment = list_last_entry(&tail_desc->segments,
1642 struct xilinx_aximcdma_tx_segment, node);
1643
1644 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1645
1646 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1647 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1648 reg |= chan->desc_pendingcount <<
1649 XILINX_MCDMA_COALESCE_SHIFT;
1650 }
1651
1652 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1653 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1654
1655 /* Program current descriptor */
1656 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1657 head_desc->async_tx.phys);
1658
1659 /* Program channel enable register */
1660 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1661 reg |= BIT(chan->tdest);
1662 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1663
1664 /* Start the fetch of BDs for the channel */
1665 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1666 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1667 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1668
1669 xilinx_dma_start(chan);
1670
1671 if (chan->err)
1672 return;
1673
1674 /* Start the transfer */
1675 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1676 tail_segment->phys);
1677
1678 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1679 chan->desc_pendingcount = 0;
1680 chan->idle = false;
1681 }
1682
1683 /**
1684 * xilinx_dma_issue_pending - Issue pending transactions
1685 * @dchan: DMA channel
1686 */
xilinx_dma_issue_pending(struct dma_chan * dchan)1687 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1688 {
1689 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1690 unsigned long flags;
1691
1692 spin_lock_irqsave(&chan->lock, flags);
1693 chan->start_transfer(chan);
1694 spin_unlock_irqrestore(&chan->lock, flags);
1695 }
1696
1697 /**
1698 * xilinx_dma_device_config - Configure the DMA channel
1699 * @dchan: DMA channel
1700 * @config: channel configuration
1701 *
1702 * Return: 0 always.
1703 */
xilinx_dma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)1704 static int xilinx_dma_device_config(struct dma_chan *dchan,
1705 struct dma_slave_config *config)
1706 {
1707 return 0;
1708 }
1709
1710 /**
1711 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1712 * @chan : xilinx DMA channel
1713 *
1714 * CONTEXT: hardirq
1715 */
xilinx_dma_complete_descriptor(struct xilinx_dma_chan * chan)1716 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1717 {
1718 struct xilinx_dma_tx_descriptor *desc, *next;
1719
1720 /* This function was invoked with lock held */
1721 if (list_empty(&chan->active_list))
1722 return;
1723
1724 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1725 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1726 struct xilinx_axidma_tx_segment *seg;
1727
1728 seg = list_last_entry(&desc->segments,
1729 struct xilinx_axidma_tx_segment, node);
1730 if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
1731 break;
1732 }
1733 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1734 XDMA_TYPE_VDMA)
1735 desc->residue = xilinx_dma_get_residue(chan, desc);
1736 else
1737 desc->residue = 0;
1738 desc->err = chan->err;
1739
1740 list_del(&desc->node);
1741 if (!desc->cyclic)
1742 dma_cookie_complete(&desc->async_tx);
1743 list_add_tail(&desc->node, &chan->done_list);
1744 }
1745 }
1746
1747 /**
1748 * xilinx_dma_reset - Reset DMA channel
1749 * @chan: Driver specific DMA channel
1750 *
1751 * Return: '0' on success and failure value on error
1752 */
xilinx_dma_reset(struct xilinx_dma_chan * chan)1753 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1754 {
1755 int err;
1756 u32 tmp;
1757
1758 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1759
1760 /* Wait for the hardware to finish reset */
1761 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1762 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1763 XILINX_DMA_LOOP_COUNT);
1764
1765 if (err) {
1766 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1767 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1768 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1769 return -ETIMEDOUT;
1770 }
1771
1772 chan->err = false;
1773 chan->idle = true;
1774 chan->desc_pendingcount = 0;
1775 chan->desc_submitcount = 0;
1776
1777 return err;
1778 }
1779
1780 /**
1781 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1782 * @chan: Driver specific DMA channel
1783 *
1784 * Return: '0' on success and failure value on error
1785 */
xilinx_dma_chan_reset(struct xilinx_dma_chan * chan)1786 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1787 {
1788 int err;
1789
1790 /* Reset VDMA */
1791 err = xilinx_dma_reset(chan);
1792 if (err)
1793 return err;
1794
1795 /* Enable interrupts */
1796 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1797 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1798
1799 return 0;
1800 }
1801
1802 /**
1803 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1804 * @irq: IRQ number
1805 * @data: Pointer to the Xilinx MCDMA channel structure
1806 *
1807 * Return: IRQ_HANDLED/IRQ_NONE
1808 */
xilinx_mcdma_irq_handler(int irq,void * data)1809 static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1810 {
1811 struct xilinx_dma_chan *chan = data;
1812 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1813
1814 if (chan->direction == DMA_DEV_TO_MEM)
1815 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1816 else
1817 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1818
1819 /* Read the channel id raising the interrupt*/
1820 chan_sermask = dma_ctrl_read(chan, ser_offset);
1821 chan_id = ffs(chan_sermask);
1822
1823 if (!chan_id)
1824 return IRQ_NONE;
1825
1826 if (chan->direction == DMA_DEV_TO_MEM)
1827 chan_offset = chan->xdev->dma_config->max_channels / 2;
1828
1829 chan_offset = chan_offset + (chan_id - 1);
1830 chan = chan->xdev->chan[chan_offset];
1831 /* Read the status and ack the interrupts. */
1832 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1833 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1834 return IRQ_NONE;
1835
1836 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1837 status & XILINX_MCDMA_IRQ_ALL_MASK);
1838
1839 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1840 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1841 chan,
1842 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1843 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1844 (chan->tdest)),
1845 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1846 (chan->tdest)));
1847 chan->err = true;
1848 }
1849
1850 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1851 /*
1852 * Device takes too long to do the transfer when user requires
1853 * responsiveness.
1854 */
1855 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1856 }
1857
1858 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1859 spin_lock(&chan->lock);
1860 xilinx_dma_complete_descriptor(chan);
1861 chan->idle = true;
1862 chan->start_transfer(chan);
1863 spin_unlock(&chan->lock);
1864 }
1865
1866 tasklet_hi_schedule(&chan->tasklet);
1867 return IRQ_HANDLED;
1868 }
1869
1870 /**
1871 * xilinx_dma_irq_handler - DMA Interrupt handler
1872 * @irq: IRQ number
1873 * @data: Pointer to the Xilinx DMA channel structure
1874 *
1875 * Return: IRQ_HANDLED/IRQ_NONE
1876 */
xilinx_dma_irq_handler(int irq,void * data)1877 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1878 {
1879 struct xilinx_dma_chan *chan = data;
1880 u32 status;
1881
1882 /* Read the status and ack the interrupts. */
1883 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1884 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1885 return IRQ_NONE;
1886
1887 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1888 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1889
1890 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1891 /*
1892 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1893 * error is recoverable, ignore it. Otherwise flag the error.
1894 *
1895 * Only recoverable errors can be cleared in the DMASR register,
1896 * make sure not to write to other error bits to 1.
1897 */
1898 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1899
1900 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1901 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1902
1903 if (!chan->flush_on_fsync ||
1904 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1905 dev_err(chan->dev,
1906 "Channel %p has errors %x, cdr %x tdr %x\n",
1907 chan, errors,
1908 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1909 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1910 chan->err = true;
1911 }
1912 }
1913
1914 if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
1915 XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
1916 spin_lock(&chan->lock);
1917 xilinx_dma_complete_descriptor(chan);
1918 chan->idle = true;
1919 chan->start_transfer(chan);
1920 spin_unlock(&chan->lock);
1921 }
1922
1923 tasklet_schedule(&chan->tasklet);
1924 return IRQ_HANDLED;
1925 }
1926
1927 /**
1928 * append_desc_queue - Queuing descriptor
1929 * @chan: Driver specific dma channel
1930 * @desc: dma transaction descriptor
1931 */
append_desc_queue(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)1932 static void append_desc_queue(struct xilinx_dma_chan *chan,
1933 struct xilinx_dma_tx_descriptor *desc)
1934 {
1935 struct xilinx_vdma_tx_segment *tail_segment;
1936 struct xilinx_dma_tx_descriptor *tail_desc;
1937 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1938 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1939 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1940
1941 if (list_empty(&chan->pending_list))
1942 goto append;
1943
1944 /*
1945 * Add the hardware descriptor to the chain of hardware descriptors
1946 * that already exists in memory.
1947 */
1948 tail_desc = list_last_entry(&chan->pending_list,
1949 struct xilinx_dma_tx_descriptor, node);
1950 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1951 tail_segment = list_last_entry(&tail_desc->segments,
1952 struct xilinx_vdma_tx_segment,
1953 node);
1954 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1955 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1956 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1957 struct xilinx_cdma_tx_segment,
1958 node);
1959 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1960 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1961 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1962 struct xilinx_axidma_tx_segment,
1963 node);
1964 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1965 } else {
1966 aximcdma_tail_segment =
1967 list_last_entry(&tail_desc->segments,
1968 struct xilinx_aximcdma_tx_segment,
1969 node);
1970 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1971 }
1972
1973 /*
1974 * Add the software descriptor and all children to the list
1975 * of pending transactions
1976 */
1977 append:
1978 list_add_tail(&desc->node, &chan->pending_list);
1979 chan->desc_pendingcount++;
1980
1981 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1982 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1983 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1984 chan->desc_pendingcount = chan->num_frms;
1985 }
1986 }
1987
1988 /**
1989 * xilinx_dma_tx_submit - Submit DMA transaction
1990 * @tx: Async transaction descriptor
1991 *
1992 * Return: cookie value on success and failure value on error
1993 */
xilinx_dma_tx_submit(struct dma_async_tx_descriptor * tx)1994 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1995 {
1996 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1997 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1998 dma_cookie_t cookie;
1999 unsigned long flags;
2000 int err;
2001
2002 if (chan->cyclic) {
2003 xilinx_dma_free_tx_descriptor(chan, desc);
2004 return -EBUSY;
2005 }
2006
2007 if (chan->err) {
2008 /*
2009 * If reset fails, need to hard reset the system.
2010 * Channel is no longer functional
2011 */
2012 err = xilinx_dma_chan_reset(chan);
2013 if (err < 0)
2014 return err;
2015 }
2016
2017 spin_lock_irqsave(&chan->lock, flags);
2018
2019 cookie = dma_cookie_assign(tx);
2020
2021 /* Put this transaction onto the tail of the pending queue */
2022 append_desc_queue(chan, desc);
2023
2024 if (desc->cyclic)
2025 chan->cyclic = true;
2026
2027 chan->terminating = false;
2028
2029 spin_unlock_irqrestore(&chan->lock, flags);
2030
2031 return cookie;
2032 }
2033
2034 /**
2035 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
2036 * DMA_SLAVE transaction
2037 * @dchan: DMA channel
2038 * @xt: Interleaved template pointer
2039 * @flags: transfer ack flags
2040 *
2041 * Return: Async transaction descriptor on success and NULL on failure
2042 */
2043 static struct dma_async_tx_descriptor *
xilinx_vdma_dma_prep_interleaved(struct dma_chan * dchan,struct dma_interleaved_template * xt,unsigned long flags)2044 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
2045 struct dma_interleaved_template *xt,
2046 unsigned long flags)
2047 {
2048 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2049 struct xilinx_dma_tx_descriptor *desc;
2050 struct xilinx_vdma_tx_segment *segment;
2051 struct xilinx_vdma_desc_hw *hw;
2052
2053 if (!is_slave_direction(xt->dir))
2054 return NULL;
2055
2056 if (!xt->numf || !xt->sgl[0].size)
2057 return NULL;
2058
2059 if (xt->numf & ~XILINX_DMA_VSIZE_MASK ||
2060 xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK)
2061 return NULL;
2062
2063 if (xt->frame_size != 1)
2064 return NULL;
2065
2066 /* Allocate a transaction descriptor. */
2067 desc = xilinx_dma_alloc_tx_descriptor(chan);
2068 if (!desc)
2069 return NULL;
2070
2071 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2072 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2073 async_tx_ack(&desc->async_tx);
2074
2075 /* Allocate the link descriptor from DMA pool */
2076 segment = xilinx_vdma_alloc_tx_segment(chan);
2077 if (!segment)
2078 goto error;
2079
2080 /* Fill in the hardware descriptor */
2081 hw = &segment->hw;
2082 hw->vsize = xt->numf;
2083 hw->hsize = xt->sgl[0].size;
2084 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2085 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2086 hw->stride |= chan->config.frm_dly <<
2087 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2088
2089 if (xt->dir != DMA_MEM_TO_DEV) {
2090 if (chan->ext_addr) {
2091 hw->buf_addr = lower_32_bits(xt->dst_start);
2092 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2093 } else {
2094 hw->buf_addr = xt->dst_start;
2095 }
2096 } else {
2097 if (chan->ext_addr) {
2098 hw->buf_addr = lower_32_bits(xt->src_start);
2099 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2100 } else {
2101 hw->buf_addr = xt->src_start;
2102 }
2103 }
2104
2105 /* Insert the segment into the descriptor segments list. */
2106 list_add_tail(&segment->node, &desc->segments);
2107
2108 /* Link the last hardware descriptor with the first. */
2109 segment = list_first_entry(&desc->segments,
2110 struct xilinx_vdma_tx_segment, node);
2111 desc->async_tx.phys = segment->phys;
2112
2113 return &desc->async_tx;
2114
2115 error:
2116 xilinx_dma_free_tx_descriptor(chan, desc);
2117 return NULL;
2118 }
2119
2120 /**
2121 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2122 * @dchan: DMA channel
2123 * @dma_dst: destination address
2124 * @dma_src: source address
2125 * @len: transfer length
2126 * @flags: transfer ack flags
2127 *
2128 * Return: Async transaction descriptor on success and NULL on failure
2129 */
2130 static struct dma_async_tx_descriptor *
xilinx_cdma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)2131 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2132 dma_addr_t dma_src, size_t len, unsigned long flags)
2133 {
2134 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2135 struct xilinx_dma_tx_descriptor *desc;
2136 struct xilinx_cdma_tx_segment *segment;
2137 struct xilinx_cdma_desc_hw *hw;
2138
2139 if (!len || len > chan->xdev->max_buffer_len)
2140 return NULL;
2141
2142 desc = xilinx_dma_alloc_tx_descriptor(chan);
2143 if (!desc)
2144 return NULL;
2145
2146 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2147 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2148
2149 /* Allocate the link descriptor from DMA pool */
2150 segment = xilinx_cdma_alloc_tx_segment(chan);
2151 if (!segment)
2152 goto error;
2153
2154 hw = &segment->hw;
2155 hw->control = len;
2156 hw->src_addr = dma_src;
2157 hw->dest_addr = dma_dst;
2158 if (chan->ext_addr) {
2159 hw->src_addr_msb = upper_32_bits(dma_src);
2160 hw->dest_addr_msb = upper_32_bits(dma_dst);
2161 }
2162
2163 /* Insert the segment into the descriptor segments list. */
2164 list_add_tail(&segment->node, &desc->segments);
2165
2166 desc->async_tx.phys = segment->phys;
2167 hw->next_desc = segment->phys;
2168
2169 return &desc->async_tx;
2170
2171 error:
2172 xilinx_dma_free_tx_descriptor(chan, desc);
2173 return NULL;
2174 }
2175
2176 /**
2177 * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
2178 * transaction from DMA vectors
2179 * @dchan: DMA channel
2180 * @vecs: Array of DMA vectors that should be transferred
2181 * @nb: number of entries in @vecs
2182 * @direction: DMA direction
2183 * @flags: transfer ack flags
2184 *
2185 * Return: Async transaction descriptor on success and NULL on failure
2186 */
xilinx_dma_prep_peripheral_dma_vec(struct dma_chan * dchan,const struct dma_vec * vecs,size_t nb,enum dma_transfer_direction direction,unsigned long flags)2187 static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
2188 struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
2189 enum dma_transfer_direction direction, unsigned long flags)
2190 {
2191 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2192 struct xilinx_dma_tx_descriptor *desc;
2193 struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
2194 size_t copy;
2195 size_t sg_used;
2196 unsigned int i;
2197
2198 if (!is_slave_direction(direction) || direction != chan->direction)
2199 return NULL;
2200
2201 desc = xilinx_dma_alloc_tx_descriptor(chan);
2202 if (!desc)
2203 return NULL;
2204
2205 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2206 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2207
2208 /* Build transactions using information from DMA vectors */
2209 for (i = 0; i < nb; i++) {
2210 sg_used = 0;
2211
2212 /* Loop until the entire dma_vec entry is used */
2213 while (sg_used < vecs[i].len) {
2214 struct xilinx_axidma_desc_hw *hw;
2215
2216 /* Get a free segment */
2217 segment = xilinx_axidma_alloc_tx_segment(chan);
2218 if (!segment)
2219 goto error;
2220
2221 /*
2222 * Calculate the maximum number of bytes to transfer,
2223 * making sure it is less than the hw limit
2224 */
2225 copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
2226 sg_used);
2227 hw = &segment->hw;
2228
2229 /* Fill in the descriptor */
2230 xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
2231 hw->control = copy;
2232
2233 if (prev)
2234 prev->hw.next_desc = segment->phys;
2235
2236 prev = segment;
2237 sg_used += copy;
2238
2239 /*
2240 * Insert the segment into the descriptor segments
2241 * list.
2242 */
2243 list_add_tail(&segment->node, &desc->segments);
2244 }
2245 }
2246
2247 head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
2248 desc->async_tx.phys = head->phys;
2249
2250 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2251 if (chan->direction == DMA_MEM_TO_DEV) {
2252 segment->hw.control |= XILINX_DMA_BD_SOP;
2253 segment = list_last_entry(&desc->segments,
2254 struct xilinx_axidma_tx_segment,
2255 node);
2256 segment->hw.control |= XILINX_DMA_BD_EOP;
2257 }
2258
2259 if (chan->xdev->has_axistream_connected)
2260 desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
2261
2262 return &desc->async_tx;
2263
2264 error:
2265 xilinx_dma_free_tx_descriptor(chan, desc);
2266 return NULL;
2267 }
2268
2269 /**
2270 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2271 * @dchan: DMA channel
2272 * @sgl: scatterlist to transfer to/from
2273 * @sg_len: number of entries in @scatterlist
2274 * @direction: DMA direction
2275 * @flags: transfer ack flags
2276 * @context: APP words of the descriptor
2277 *
2278 * Return: Async transaction descriptor on success and NULL on failure
2279 */
xilinx_dma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2280 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2281 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2282 enum dma_transfer_direction direction, unsigned long flags,
2283 void *context)
2284 {
2285 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2286 struct xilinx_dma_tx_descriptor *desc;
2287 struct xilinx_axidma_tx_segment *segment = NULL;
2288 u32 *app_w = (u32 *)context;
2289 struct scatterlist *sg;
2290 size_t copy;
2291 size_t sg_used;
2292 unsigned int i;
2293
2294 if (!is_slave_direction(direction))
2295 return NULL;
2296
2297 /* Allocate a transaction descriptor. */
2298 desc = xilinx_dma_alloc_tx_descriptor(chan);
2299 if (!desc)
2300 return NULL;
2301
2302 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2303 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2304
2305 /* Build transactions using information in the scatter gather list */
2306 for_each_sg(sgl, sg, sg_len, i) {
2307 sg_used = 0;
2308
2309 /* Loop until the entire scatterlist entry is used */
2310 while (sg_used < sg_dma_len(sg)) {
2311 struct xilinx_axidma_desc_hw *hw;
2312
2313 /* Get a free segment */
2314 segment = xilinx_axidma_alloc_tx_segment(chan);
2315 if (!segment)
2316 goto error;
2317
2318 /*
2319 * Calculate the maximum number of bytes to transfer,
2320 * making sure it is less than the hw limit
2321 */
2322 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2323 sg_used);
2324 hw = &segment->hw;
2325
2326 /* Fill in the descriptor */
2327 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2328 sg_used, 0);
2329
2330 hw->control = copy;
2331
2332 if (chan->direction == DMA_MEM_TO_DEV) {
2333 if (app_w)
2334 memcpy(hw->app, app_w, sizeof(u32) *
2335 XILINX_DMA_NUM_APP_WORDS);
2336 }
2337
2338 sg_used += copy;
2339
2340 /*
2341 * Insert the segment into the descriptor segments
2342 * list.
2343 */
2344 list_add_tail(&segment->node, &desc->segments);
2345 }
2346 }
2347
2348 segment = list_first_entry(&desc->segments,
2349 struct xilinx_axidma_tx_segment, node);
2350 desc->async_tx.phys = segment->phys;
2351
2352 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2353 if (chan->direction == DMA_MEM_TO_DEV) {
2354 segment->hw.control |= XILINX_DMA_BD_SOP;
2355 segment = list_last_entry(&desc->segments,
2356 struct xilinx_axidma_tx_segment,
2357 node);
2358 segment->hw.control |= XILINX_DMA_BD_EOP;
2359 }
2360
2361 if (chan->xdev->has_axistream_connected)
2362 desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
2363
2364 return &desc->async_tx;
2365
2366 error:
2367 xilinx_dma_free_tx_descriptor(chan, desc);
2368 return NULL;
2369 }
2370
2371 /**
2372 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2373 * @dchan: DMA channel
2374 * @buf_addr: Physical address of the buffer
2375 * @buf_len: Total length of the cyclic buffers
2376 * @period_len: length of individual cyclic buffer
2377 * @direction: DMA direction
2378 * @flags: transfer ack flags
2379 *
2380 * Return: Async transaction descriptor on success and NULL on failure
2381 */
xilinx_dma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)2382 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2383 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2384 size_t period_len, enum dma_transfer_direction direction,
2385 unsigned long flags)
2386 {
2387 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2388 struct xilinx_dma_tx_descriptor *desc;
2389 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2390 size_t copy, sg_used;
2391 unsigned int num_periods;
2392 int i;
2393 u32 reg;
2394
2395 if (!period_len)
2396 return NULL;
2397
2398 num_periods = buf_len / period_len;
2399
2400 if (!num_periods)
2401 return NULL;
2402
2403 if (!is_slave_direction(direction))
2404 return NULL;
2405
2406 /* Allocate a transaction descriptor. */
2407 desc = xilinx_dma_alloc_tx_descriptor(chan);
2408 if (!desc)
2409 return NULL;
2410
2411 chan->direction = direction;
2412 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2413 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2414
2415 for (i = 0; i < num_periods; ++i) {
2416 sg_used = 0;
2417
2418 while (sg_used < period_len) {
2419 struct xilinx_axidma_desc_hw *hw;
2420
2421 /* Get a free segment */
2422 segment = xilinx_axidma_alloc_tx_segment(chan);
2423 if (!segment)
2424 goto error;
2425
2426 /*
2427 * Calculate the maximum number of bytes to transfer,
2428 * making sure it is less than the hw limit
2429 */
2430 copy = xilinx_dma_calc_copysize(chan, period_len,
2431 sg_used);
2432 hw = &segment->hw;
2433 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2434 period_len * i);
2435 hw->control = copy;
2436
2437 if (prev)
2438 prev->hw.next_desc = segment->phys;
2439
2440 prev = segment;
2441 sg_used += copy;
2442
2443 /*
2444 * Insert the segment into the descriptor segments
2445 * list.
2446 */
2447 list_add_tail(&segment->node, &desc->segments);
2448 }
2449 }
2450
2451 head_segment = list_first_entry(&desc->segments,
2452 struct xilinx_axidma_tx_segment, node);
2453 desc->async_tx.phys = head_segment->phys;
2454
2455 desc->cyclic = true;
2456 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2457 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2458 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2459
2460 segment = list_last_entry(&desc->segments,
2461 struct xilinx_axidma_tx_segment,
2462 node);
2463 segment->hw.next_desc = (u32) head_segment->phys;
2464
2465 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2466 if (direction == DMA_MEM_TO_DEV) {
2467 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2468 segment->hw.control |= XILINX_DMA_BD_EOP;
2469 }
2470
2471 return &desc->async_tx;
2472
2473 error:
2474 xilinx_dma_free_tx_descriptor(chan, desc);
2475 return NULL;
2476 }
2477
2478 /**
2479 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2480 * @dchan: DMA channel
2481 * @sgl: scatterlist to transfer to/from
2482 * @sg_len: number of entries in @scatterlist
2483 * @direction: DMA direction
2484 * @flags: transfer ack flags
2485 * @context: APP words of the descriptor
2486 *
2487 * Return: Async transaction descriptor on success and NULL on failure
2488 */
2489 static struct dma_async_tx_descriptor *
xilinx_mcdma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2490 xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2491 unsigned int sg_len,
2492 enum dma_transfer_direction direction,
2493 unsigned long flags, void *context)
2494 {
2495 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2496 struct xilinx_dma_tx_descriptor *desc;
2497 struct xilinx_aximcdma_tx_segment *segment = NULL;
2498 u32 *app_w = (u32 *)context;
2499 struct scatterlist *sg;
2500 size_t copy;
2501 size_t sg_used;
2502 unsigned int i;
2503
2504 if (!is_slave_direction(direction))
2505 return NULL;
2506
2507 /* Allocate a transaction descriptor. */
2508 desc = xilinx_dma_alloc_tx_descriptor(chan);
2509 if (!desc)
2510 return NULL;
2511
2512 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2513 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2514
2515 /* Build transactions using information in the scatter gather list */
2516 for_each_sg(sgl, sg, sg_len, i) {
2517 sg_used = 0;
2518
2519 /* Loop until the entire scatterlist entry is used */
2520 while (sg_used < sg_dma_len(sg)) {
2521 struct xilinx_aximcdma_desc_hw *hw;
2522
2523 /* Get a free segment */
2524 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2525 if (!segment)
2526 goto error;
2527
2528 /*
2529 * Calculate the maximum number of bytes to transfer,
2530 * making sure it is less than the hw limit
2531 */
2532 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2533 chan->xdev->max_buffer_len);
2534 hw = &segment->hw;
2535
2536 /* Fill in the descriptor */
2537 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2538 sg_used);
2539 hw->control = copy;
2540
2541 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2542 memcpy(hw->app, app_w, sizeof(u32) *
2543 XILINX_DMA_NUM_APP_WORDS);
2544 }
2545
2546 sg_used += copy;
2547 /*
2548 * Insert the segment into the descriptor segments
2549 * list.
2550 */
2551 list_add_tail(&segment->node, &desc->segments);
2552 }
2553 }
2554
2555 segment = list_first_entry(&desc->segments,
2556 struct xilinx_aximcdma_tx_segment, node);
2557 desc->async_tx.phys = segment->phys;
2558
2559 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2560 if (chan->direction == DMA_MEM_TO_DEV) {
2561 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2562 segment = list_last_entry(&desc->segments,
2563 struct xilinx_aximcdma_tx_segment,
2564 node);
2565 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2566 }
2567
2568 return &desc->async_tx;
2569
2570 error:
2571 xilinx_dma_free_tx_descriptor(chan, desc);
2572
2573 return NULL;
2574 }
2575
2576 /**
2577 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2578 * @dchan: Driver specific DMA Channel pointer
2579 *
2580 * Return: '0' always.
2581 */
xilinx_dma_terminate_all(struct dma_chan * dchan)2582 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2583 {
2584 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2585 u32 reg;
2586 int err;
2587
2588 if (!chan->cyclic) {
2589 err = chan->stop_transfer(chan);
2590 if (err) {
2591 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2592 chan, dma_ctrl_read(chan,
2593 XILINX_DMA_REG_DMASR));
2594 chan->err = true;
2595 }
2596 }
2597
2598 xilinx_dma_chan_reset(chan);
2599 /* Remove and free all of the descriptors in the lists */
2600 chan->terminating = true;
2601 xilinx_dma_free_descriptors(chan);
2602 chan->idle = true;
2603
2604 if (chan->cyclic) {
2605 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2606 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2607 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2608 chan->cyclic = false;
2609 }
2610
2611 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2612 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2613 XILINX_CDMA_CR_SGMODE);
2614
2615 return 0;
2616 }
2617
xilinx_dma_synchronize(struct dma_chan * dchan)2618 static void xilinx_dma_synchronize(struct dma_chan *dchan)
2619 {
2620 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2621
2622 tasklet_kill(&chan->tasklet);
2623 }
2624
2625 /**
2626 * xilinx_vdma_channel_set_config - Configure VDMA channel
2627 * Run-time configuration for Axi VDMA, supports:
2628 * . halt the channel
2629 * . configure interrupt coalescing and inter-packet delay threshold
2630 * . start/stop parking
2631 * . enable genlock
2632 *
2633 * @dchan: DMA channel
2634 * @cfg: VDMA device configuration pointer
2635 *
2636 * Return: '0' on success and failure value on error
2637 */
xilinx_vdma_channel_set_config(struct dma_chan * dchan,struct xilinx_vdma_config * cfg)2638 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2639 struct xilinx_vdma_config *cfg)
2640 {
2641 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2642 u32 dmacr;
2643
2644 if (cfg->reset)
2645 return xilinx_dma_chan_reset(chan);
2646
2647 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2648
2649 chan->config.frm_dly = cfg->frm_dly;
2650 chan->config.park = cfg->park;
2651
2652 /* genlock settings */
2653 chan->config.gen_lock = cfg->gen_lock;
2654 chan->config.master = cfg->master;
2655
2656 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2657 if (cfg->gen_lock && chan->genlock) {
2658 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2659 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2660 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2661 }
2662
2663 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2664 chan->config.vflip_en = cfg->vflip_en;
2665
2666 if (cfg->park)
2667 chan->config.park_frm = cfg->park_frm;
2668 else
2669 chan->config.park_frm = -1;
2670
2671 chan->config.coalesc = cfg->coalesc;
2672 chan->config.delay = cfg->delay;
2673
2674 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2675 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2676 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2677 chan->config.coalesc = cfg->coalesc;
2678 }
2679
2680 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2681 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2682 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2683 chan->config.delay = cfg->delay;
2684 }
2685
2686 /* FSync Source selection */
2687 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2688 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2689
2690 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2691
2692 return 0;
2693 }
2694 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2695
2696 /* -----------------------------------------------------------------------------
2697 * Probe and remove
2698 */
2699
2700 /**
2701 * xilinx_dma_chan_remove - Per Channel remove function
2702 * @chan: Driver specific DMA channel
2703 */
xilinx_dma_chan_remove(struct xilinx_dma_chan * chan)2704 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2705 {
2706 /* Disable all interrupts */
2707 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2708 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2709
2710 if (chan->irq > 0)
2711 free_irq(chan->irq, chan);
2712
2713 tasklet_kill(&chan->tasklet);
2714
2715 list_del(&chan->common.device_node);
2716 }
2717
axidma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** sg_clk,struct clk ** tmp_clk)2718 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2719 struct clk **tx_clk, struct clk **rx_clk,
2720 struct clk **sg_clk, struct clk **tmp_clk)
2721 {
2722 int err;
2723
2724 *tmp_clk = NULL;
2725
2726 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2727 if (IS_ERR(*axi_clk))
2728 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2729
2730 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2731 if (IS_ERR(*tx_clk))
2732 *tx_clk = NULL;
2733
2734 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2735 if (IS_ERR(*rx_clk))
2736 *rx_clk = NULL;
2737
2738 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2739 if (IS_ERR(*sg_clk))
2740 *sg_clk = NULL;
2741
2742 err = clk_prepare_enable(*axi_clk);
2743 if (err) {
2744 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2745 return err;
2746 }
2747
2748 err = clk_prepare_enable(*tx_clk);
2749 if (err) {
2750 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2751 goto err_disable_axiclk;
2752 }
2753
2754 err = clk_prepare_enable(*rx_clk);
2755 if (err) {
2756 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2757 goto err_disable_txclk;
2758 }
2759
2760 err = clk_prepare_enable(*sg_clk);
2761 if (err) {
2762 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2763 goto err_disable_rxclk;
2764 }
2765
2766 return 0;
2767
2768 err_disable_rxclk:
2769 clk_disable_unprepare(*rx_clk);
2770 err_disable_txclk:
2771 clk_disable_unprepare(*tx_clk);
2772 err_disable_axiclk:
2773 clk_disable_unprepare(*axi_clk);
2774
2775 return err;
2776 }
2777
axicdma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** dev_clk,struct clk ** tmp_clk,struct clk ** tmp1_clk,struct clk ** tmp2_clk)2778 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2779 struct clk **dev_clk, struct clk **tmp_clk,
2780 struct clk **tmp1_clk, struct clk **tmp2_clk)
2781 {
2782 int err;
2783
2784 *tmp_clk = NULL;
2785 *tmp1_clk = NULL;
2786 *tmp2_clk = NULL;
2787
2788 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2789 if (IS_ERR(*axi_clk))
2790 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2791
2792 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2793 if (IS_ERR(*dev_clk))
2794 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2795
2796 err = clk_prepare_enable(*axi_clk);
2797 if (err) {
2798 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2799 return err;
2800 }
2801
2802 err = clk_prepare_enable(*dev_clk);
2803 if (err) {
2804 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2805 goto err_disable_axiclk;
2806 }
2807
2808 return 0;
2809
2810 err_disable_axiclk:
2811 clk_disable_unprepare(*axi_clk);
2812
2813 return err;
2814 }
2815
axivdma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** tx_clk,struct clk ** txs_clk,struct clk ** rx_clk,struct clk ** rxs_clk)2816 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2817 struct clk **tx_clk, struct clk **txs_clk,
2818 struct clk **rx_clk, struct clk **rxs_clk)
2819 {
2820 int err;
2821
2822 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2823 if (IS_ERR(*axi_clk))
2824 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2825
2826 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2827 if (IS_ERR(*tx_clk))
2828 *tx_clk = NULL;
2829
2830 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2831 if (IS_ERR(*txs_clk))
2832 *txs_clk = NULL;
2833
2834 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2835 if (IS_ERR(*rx_clk))
2836 *rx_clk = NULL;
2837
2838 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2839 if (IS_ERR(*rxs_clk))
2840 *rxs_clk = NULL;
2841
2842 err = clk_prepare_enable(*axi_clk);
2843 if (err) {
2844 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2845 err);
2846 return err;
2847 }
2848
2849 err = clk_prepare_enable(*tx_clk);
2850 if (err) {
2851 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2852 goto err_disable_axiclk;
2853 }
2854
2855 err = clk_prepare_enable(*txs_clk);
2856 if (err) {
2857 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2858 goto err_disable_txclk;
2859 }
2860
2861 err = clk_prepare_enable(*rx_clk);
2862 if (err) {
2863 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2864 goto err_disable_txsclk;
2865 }
2866
2867 err = clk_prepare_enable(*rxs_clk);
2868 if (err) {
2869 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2870 goto err_disable_rxclk;
2871 }
2872
2873 return 0;
2874
2875 err_disable_rxclk:
2876 clk_disable_unprepare(*rx_clk);
2877 err_disable_txsclk:
2878 clk_disable_unprepare(*txs_clk);
2879 err_disable_txclk:
2880 clk_disable_unprepare(*tx_clk);
2881 err_disable_axiclk:
2882 clk_disable_unprepare(*axi_clk);
2883
2884 return err;
2885 }
2886
xdma_disable_allclks(struct xilinx_dma_device * xdev)2887 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2888 {
2889 clk_disable_unprepare(xdev->rxs_clk);
2890 clk_disable_unprepare(xdev->rx_clk);
2891 clk_disable_unprepare(xdev->txs_clk);
2892 clk_disable_unprepare(xdev->tx_clk);
2893 clk_disable_unprepare(xdev->axi_clk);
2894 }
2895
2896 /**
2897 * xilinx_dma_chan_probe - Per Channel Probing
2898 * It get channel features from the device tree entry and
2899 * initialize special channel handling routines
2900 *
2901 * @xdev: Driver specific device structure
2902 * @node: Device node
2903 *
2904 * Return: '0' on success and failure value on error
2905 */
xilinx_dma_chan_probe(struct xilinx_dma_device * xdev,struct device_node * node)2906 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2907 struct device_node *node)
2908 {
2909 struct xilinx_dma_chan *chan;
2910 bool has_dre = false;
2911 u32 value, width;
2912 int err;
2913
2914 /* Allocate and initialize the channel structure */
2915 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2916 if (!chan)
2917 return -ENOMEM;
2918
2919 chan->dev = xdev->dev;
2920 chan->xdev = xdev;
2921 chan->desc_pendingcount = 0x0;
2922 chan->ext_addr = xdev->ext_addr;
2923 /* This variable ensures that descriptors are not
2924 * Submitted when dma engine is in progress. This variable is
2925 * Added to avoid polling for a bit in the status register to
2926 * Know dma state in the driver hot path.
2927 */
2928 chan->idle = true;
2929
2930 spin_lock_init(&chan->lock);
2931 INIT_LIST_HEAD(&chan->pending_list);
2932 INIT_LIST_HEAD(&chan->done_list);
2933 INIT_LIST_HEAD(&chan->active_list);
2934 INIT_LIST_HEAD(&chan->free_seg_list);
2935
2936 /* Retrieve the channel properties from the device tree */
2937 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2938
2939 of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
2940
2941 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2942
2943 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2944 if (err) {
2945 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2946 return err;
2947 }
2948 width = value >> 3; /* Convert bits to bytes */
2949
2950 /* If data width is greater than 8 bytes, DRE is not in hw */
2951 if (width > 8)
2952 has_dre = false;
2953
2954 if (!has_dre)
2955 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2956
2957 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2958 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2959 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2960 chan->direction = DMA_MEM_TO_DEV;
2961 chan->id = xdev->mm2s_chan_id++;
2962 chan->tdest = chan->id;
2963
2964 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2965 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2966 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2967 chan->config.park = 1;
2968
2969 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2970 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2971 chan->flush_on_fsync = true;
2972 }
2973 } else if (of_device_is_compatible(node,
2974 "xlnx,axi-vdma-s2mm-channel") ||
2975 of_device_is_compatible(node,
2976 "xlnx,axi-dma-s2mm-channel")) {
2977 chan->direction = DMA_DEV_TO_MEM;
2978 chan->id = xdev->s2mm_chan_id++;
2979 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2980 chan->has_vflip = of_property_read_bool(node,
2981 "xlnx,enable-vert-flip");
2982 if (chan->has_vflip) {
2983 chan->config.vflip_en = dma_read(chan,
2984 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2985 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2986 }
2987
2988 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2989 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2990 else
2991 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2992
2993 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2994 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2995 chan->config.park = 1;
2996
2997 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2998 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2999 chan->flush_on_fsync = true;
3000 }
3001 } else {
3002 dev_err(xdev->dev, "Invalid channel compatible node\n");
3003 return -EINVAL;
3004 }
3005
3006 xdev->common.directions |= chan->direction;
3007
3008 /* Request the interrupt */
3009 chan->irq = of_irq_get(node, chan->tdest);
3010 if (chan->irq < 0)
3011 return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
3012 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
3013 IRQF_SHARED, "xilinx-dma-controller", chan);
3014 if (err) {
3015 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
3016 return err;
3017 }
3018
3019 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3020 chan->start_transfer = xilinx_dma_start_transfer;
3021 chan->stop_transfer = xilinx_dma_stop_transfer;
3022 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3023 chan->start_transfer = xilinx_mcdma_start_transfer;
3024 chan->stop_transfer = xilinx_dma_stop_transfer;
3025 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3026 chan->start_transfer = xilinx_cdma_start_transfer;
3027 chan->stop_transfer = xilinx_cdma_stop_transfer;
3028 } else {
3029 chan->start_transfer = xilinx_vdma_start_transfer;
3030 chan->stop_transfer = xilinx_dma_stop_transfer;
3031 }
3032
3033 /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
3034 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
3035 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
3036 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
3037 XILINX_DMA_DMASR_SG_MASK)
3038 chan->has_sg = true;
3039 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
3040 str_enabled_disabled(chan->has_sg));
3041 }
3042
3043 /* Initialize the tasklet */
3044 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
3045
3046 /*
3047 * Initialize the DMA channel and add it to the DMA engine channels
3048 * list.
3049 */
3050 chan->common.device = &xdev->common;
3051
3052 list_add_tail(&chan->common.device_node, &xdev->common.channels);
3053 xdev->chan[chan->id] = chan;
3054
3055 /* Reset the channel */
3056 err = xilinx_dma_chan_reset(chan);
3057 if (err < 0) {
3058 dev_err(xdev->dev, "Reset channel failed\n");
3059 return err;
3060 }
3061
3062 return 0;
3063 }
3064
3065 /**
3066 * xilinx_dma_child_probe - Per child node probe
3067 * It get number of dma-channels per child node from
3068 * device-tree and initializes all the channels.
3069 *
3070 * @xdev: Driver specific device structure
3071 * @node: Device node
3072 *
3073 * Return: '0' on success and failure value on error.
3074 */
xilinx_dma_child_probe(struct xilinx_dma_device * xdev,struct device_node * node)3075 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
3076 struct device_node *node)
3077 {
3078 int ret, i;
3079 u32 nr_channels = 1;
3080
3081 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
3082 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
3083 dev_warn(xdev->dev, "missing dma-channels property\n");
3084
3085 for (i = 0; i < nr_channels; i++) {
3086 ret = xilinx_dma_chan_probe(xdev, node);
3087 if (ret)
3088 return ret;
3089 }
3090
3091 return 0;
3092 }
3093
3094 /**
3095 * of_dma_xilinx_xlate - Translation function
3096 * @dma_spec: Pointer to DMA specifier as found in the device tree
3097 * @ofdma: Pointer to DMA controller data
3098 *
3099 * Return: DMA channel pointer on success and NULL on error
3100 */
of_dma_xilinx_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)3101 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
3102 struct of_dma *ofdma)
3103 {
3104 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
3105 int chan_id = dma_spec->args[0];
3106
3107 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
3108 return NULL;
3109
3110 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
3111 }
3112
3113 static const struct xilinx_dma_config axidma_config = {
3114 .dmatype = XDMA_TYPE_AXIDMA,
3115 .clk_init = axidma_clk_init,
3116 .irq_handler = xilinx_dma_irq_handler,
3117 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3118 };
3119
3120 static const struct xilinx_dma_config aximcdma_config = {
3121 .dmatype = XDMA_TYPE_AXIMCDMA,
3122 .clk_init = axidma_clk_init,
3123 .irq_handler = xilinx_mcdma_irq_handler,
3124 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
3125 };
3126 static const struct xilinx_dma_config axicdma_config = {
3127 .dmatype = XDMA_TYPE_CDMA,
3128 .clk_init = axicdma_clk_init,
3129 .irq_handler = xilinx_dma_irq_handler,
3130 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
3131 };
3132
3133 static const struct xilinx_dma_config axivdma_config = {
3134 .dmatype = XDMA_TYPE_VDMA,
3135 .clk_init = axivdma_clk_init,
3136 .irq_handler = xilinx_dma_irq_handler,
3137 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3138 };
3139
3140 static const struct of_device_id xilinx_dma_of_ids[] = {
3141 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
3142 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
3143 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
3144 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
3145 {}
3146 };
3147 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
3148
3149 /**
3150 * xilinx_dma_probe - Driver probe function
3151 * @pdev: Pointer to the platform_device structure
3152 *
3153 * Return: '0' on success and failure value on error
3154 */
xilinx_dma_probe(struct platform_device * pdev)3155 static int xilinx_dma_probe(struct platform_device *pdev)
3156 {
3157 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
3158 struct clk **, struct clk **, struct clk **)
3159 = axivdma_clk_init;
3160 struct device_node *node = pdev->dev.of_node;
3161 struct xilinx_dma_device *xdev;
3162 struct device_node *child, *np = pdev->dev.of_node;
3163 u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width;
3164 int i, err;
3165
3166 /* Allocate and initialize the DMA engine structure */
3167 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
3168 if (!xdev)
3169 return -ENOMEM;
3170
3171 xdev->dev = &pdev->dev;
3172 if (np) {
3173 const struct of_device_id *match;
3174
3175 match = of_match_node(xilinx_dma_of_ids, np);
3176 if (match && match->data) {
3177 xdev->dma_config = match->data;
3178 clk_init = xdev->dma_config->clk_init;
3179 }
3180 }
3181
3182 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3183 &xdev->rx_clk, &xdev->rxs_clk);
3184 if (err)
3185 return err;
3186
3187 /* Request and map I/O memory */
3188 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3189 if (IS_ERR(xdev->regs)) {
3190 err = PTR_ERR(xdev->regs);
3191 goto disable_clks;
3192 }
3193 /* Retrieve the DMA engine properties from the device tree */
3194 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3195 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3196
3197 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3198 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3199 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3200 &len_width)) {
3201 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3202 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3203 dev_warn(xdev->dev,
3204 "invalid xlnx,sg-length-width property value. Using default width\n");
3205 } else {
3206 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3207 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3208 xdev->max_buffer_len =
3209 GENMASK(len_width - 1, 0);
3210 }
3211 }
3212 }
3213
3214 dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
3215
3216 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3217 xdev->has_axistream_connected =
3218 of_property_read_bool(node, "xlnx,axistream-connected");
3219 }
3220
3221 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3222 err = of_property_read_u32(node, "xlnx,num-fstores",
3223 &num_frames);
3224 if (err < 0) {
3225 dev_err(xdev->dev,
3226 "missing xlnx,num-fstores property\n");
3227 goto disable_clks;
3228 }
3229
3230 err = of_property_read_u32(node, "xlnx,flush-fsync",
3231 &xdev->flush_on_fsync);
3232 if (err < 0)
3233 dev_warn(xdev->dev,
3234 "missing xlnx,flush-fsync property\n");
3235 }
3236
3237 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3238 if (err < 0)
3239 dev_warn(xdev->dev,
3240 "missing xlnx,addrwidth property, using default value %d\n",
3241 XILINX_DMA_DFAULT_ADDRWIDTH);
3242
3243 if (addr_width > 32)
3244 xdev->ext_addr = true;
3245 else
3246 xdev->ext_addr = false;
3247
3248 /* Set metadata mode */
3249 if (xdev->has_axistream_connected)
3250 xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
3251
3252 /* Set the dma mask bits */
3253 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3254 if (err < 0) {
3255 dev_err(xdev->dev, "DMA mask error %d\n", err);
3256 goto disable_clks;
3257 }
3258
3259 /* Initialize the DMA engine */
3260 xdev->common.dev = &pdev->dev;
3261
3262 INIT_LIST_HEAD(&xdev->common.channels);
3263 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3264 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3265 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3266 }
3267
3268 xdev->common.device_alloc_chan_resources =
3269 xilinx_dma_alloc_chan_resources;
3270 xdev->common.device_free_chan_resources =
3271 xilinx_dma_free_chan_resources;
3272 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3273 xdev->common.device_synchronize = xilinx_dma_synchronize;
3274 xdev->common.device_tx_status = xilinx_dma_tx_status;
3275 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3276 xdev->common.device_config = xilinx_dma_device_config;
3277 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3278 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3279 xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
3280 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3281 xdev->common.device_prep_dma_cyclic =
3282 xilinx_dma_prep_dma_cyclic;
3283 /* Residue calculation is supported by only AXI DMA and CDMA */
3284 xdev->common.residue_granularity =
3285 DMA_RESIDUE_GRANULARITY_SEGMENT;
3286 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3287 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3288 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3289 /* Residue calculation is supported by only AXI DMA and CDMA */
3290 xdev->common.residue_granularity =
3291 DMA_RESIDUE_GRANULARITY_SEGMENT;
3292 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3293 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3294 } else {
3295 xdev->common.device_prep_interleaved_dma =
3296 xilinx_vdma_dma_prep_interleaved;
3297 }
3298
3299 platform_set_drvdata(pdev, xdev);
3300
3301 /* Initialize the channels */
3302 for_each_child_of_node(node, child) {
3303 err = xilinx_dma_child_probe(xdev, child);
3304 if (err < 0) {
3305 of_node_put(child);
3306 goto error;
3307 }
3308 }
3309
3310 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3311 for (i = 0; i < xdev->dma_config->max_channels; i++)
3312 if (xdev->chan[i])
3313 xdev->chan[i]->num_frms = num_frames;
3314 }
3315
3316 /* Register the DMA engine with the core */
3317 err = dma_async_device_register(&xdev->common);
3318 if (err) {
3319 dev_err(xdev->dev, "failed to register the dma device\n");
3320 goto error;
3321 }
3322
3323 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3324 xdev);
3325 if (err < 0) {
3326 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3327 dma_async_device_unregister(&xdev->common);
3328 goto error;
3329 }
3330
3331 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3332 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3333 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3334 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3335 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3336 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3337 else
3338 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3339
3340 return 0;
3341
3342 error:
3343 for (i = 0; i < xdev->dma_config->max_channels; i++)
3344 if (xdev->chan[i])
3345 xilinx_dma_chan_remove(xdev->chan[i]);
3346 disable_clks:
3347 xdma_disable_allclks(xdev);
3348
3349 return err;
3350 }
3351
3352 /**
3353 * xilinx_dma_remove - Driver remove function
3354 * @pdev: Pointer to the platform_device structure
3355 */
xilinx_dma_remove(struct platform_device * pdev)3356 static void xilinx_dma_remove(struct platform_device *pdev)
3357 {
3358 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3359 int i;
3360
3361 of_dma_controller_free(pdev->dev.of_node);
3362
3363 dma_async_device_unregister(&xdev->common);
3364
3365 for (i = 0; i < xdev->dma_config->max_channels; i++)
3366 if (xdev->chan[i])
3367 xilinx_dma_chan_remove(xdev->chan[i]);
3368
3369 xdma_disable_allclks(xdev);
3370 }
3371
3372 static struct platform_driver xilinx_vdma_driver = {
3373 .driver = {
3374 .name = "xilinx-vdma",
3375 .of_match_table = xilinx_dma_of_ids,
3376 },
3377 .probe = xilinx_dma_probe,
3378 .remove = xilinx_dma_remove,
3379 };
3380
3381 module_platform_driver(xilinx_vdma_driver);
3382
3383 MODULE_AUTHOR("Xilinx, Inc.");
3384 MODULE_DESCRIPTION("Xilinx VDMA driver");
3385 MODULE_LICENSE("GPL v2");
3386