1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DMA driver for Xilinx Video DMA Engine
4 *
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 *
7 * Based on the Freescale DMA driver.
8 *
9 * Description:
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
19 *
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
24 *
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
28 *
29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
30 * Xilinx IP that provides high-bandwidth direct memory access between
31 * memory and AXI4-Stream target peripherals. It provides scatter gather
32 * (SG) interface with multiple channels independent configuration support.
33 *
34 */
35
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_irq.h>
47 #include <linux/platform_device.h>
48 #include <linux/slab.h>
49 #include <linux/string_choices.h>
50 #include <linux/clk.h>
51 #include <linux/io-64-nonatomic-lo-hi.h>
52
53 #include "../dmaengine.h"
54
55 /* Register/Descriptor Offsets */
56 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
57 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
58 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
59 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
60
61 /* Control Registers */
62 #define XILINX_DMA_REG_DMACR 0x0000
63 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
64 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
65 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
66 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
67 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
68 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
69 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
70 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
71 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
72 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
73 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
74 #define XILINX_DMA_DMACR_RESET BIT(2)
75 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
76 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
77 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
78 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
79 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
80 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
81
82 #define XILINX_DMA_REG_DMASR 0x0004
83 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
84 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
85 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
86 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
87 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
88 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
89 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
90 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
91 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
92 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
93 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
94 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
95 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
96 #define XILINX_DMA_DMASR_IDLE BIT(1)
97 #define XILINX_DMA_DMASR_HALTED BIT(0)
98 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
99 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
100
101 #define XILINX_DMA_REG_CURDESC 0x0008
102 #define XILINX_DMA_REG_TAILDESC 0x0010
103 #define XILINX_DMA_REG_REG_INDEX 0x0014
104 #define XILINX_DMA_REG_FRMSTORE 0x0018
105 #define XILINX_DMA_REG_THRESHOLD 0x001c
106 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
107 #define XILINX_DMA_REG_PARK_PTR 0x0028
108 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
109 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
110 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
111 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
112 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
113
114 /* Register Direct Mode Registers */
115 #define XILINX_DMA_REG_VSIZE 0x0000
116 #define XILINX_DMA_VSIZE_MASK GENMASK(12, 0)
117 #define XILINX_DMA_REG_HSIZE 0x0004
118 #define XILINX_DMA_HSIZE_MASK GENMASK(15, 0)
119
120 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
121 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
122 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
123
124 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
125 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
126
127 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
128 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
129
130 /* HW specific definitions */
131 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
132 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
133 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
134
135 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
136 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
137 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
138 XILINX_DMA_DMASR_ERR_IRQ)
139
140 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
141 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
142 XILINX_DMA_DMASR_SOF_LATE_ERR | \
143 XILINX_DMA_DMASR_SG_DEC_ERR | \
144 XILINX_DMA_DMASR_SG_SLV_ERR | \
145 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
146 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
147 XILINX_DMA_DMASR_DMA_DEC_ERR | \
148 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
149 XILINX_DMA_DMASR_DMA_INT_ERR)
150
151 /*
152 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
153 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
154 * is enabled in the h/w system.
155 */
156 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
157 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
158 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
159 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
160 XILINX_DMA_DMASR_DMA_INT_ERR)
161
162 /* Axi VDMA Flush on Fsync bits */
163 #define XILINX_DMA_FLUSH_S2MM 3
164 #define XILINX_DMA_FLUSH_MM2S 2
165 #define XILINX_DMA_FLUSH_BOTH 1
166
167 /* Delay loop counter to prevent hardware failure */
168 #define XILINX_DMA_LOOP_COUNT 1000000
169
170 /* AXI DMA Specific Registers/Offsets */
171 #define XILINX_DMA_REG_SRCDSTADDR 0x18
172 #define XILINX_DMA_REG_BTT 0x28
173
174 /* AXI DMA Specific Masks/Bit fields */
175 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
176 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
177 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
178 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
179 #define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
180 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
181 #define XILINX_DMA_CR_COALESCE_SHIFT 16
182 #define XILINX_DMA_CR_DELAY_SHIFT 24
183 #define XILINX_DMA_BD_SOP BIT(27)
184 #define XILINX_DMA_BD_EOP BIT(26)
185 #define XILINX_DMA_BD_COMP_MASK BIT(31)
186 #define XILINX_DMA_COALESCE_MAX 255
187 #define XILINX_DMA_NUM_DESCS 512
188 #define XILINX_DMA_NUM_APP_WORDS 5
189
190 /* AXI CDMA Specific Registers/Offsets */
191 #define XILINX_CDMA_REG_SRCADDR 0x18
192 #define XILINX_CDMA_REG_DSTADDR 0x20
193
194 /* AXI CDMA Specific Masks */
195 #define XILINX_CDMA_CR_SGMODE BIT(3)
196
197 #define xilinx_prep_dma_addr_t(addr) \
198 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
199
200 /* AXI MCDMA Specific Registers/Offsets */
201 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
202 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
203 #define XILINX_MCDMA_CHEN_OFFSET 0x0008
204 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
205 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
206 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
207 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
208 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
209 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
210 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
211
212 /* AXI MCDMA Specific Masks/Shifts */
213 #define XILINX_MCDMA_COALESCE_SHIFT 16
214 #define XILINX_MCDMA_COALESCE_MAX 24
215 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
216 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
217 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
218 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
219 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
220 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
221 #define XILINX_MCDMA_BD_EOP BIT(30)
222 #define XILINX_MCDMA_BD_SOP BIT(31)
223
224 /**
225 * struct xilinx_vdma_desc_hw - Hardware Descriptor
226 * @next_desc: Next Descriptor Pointer @0x00
227 * @pad1: Reserved @0x04
228 * @buf_addr: Buffer address @0x08
229 * @buf_addr_msb: MSB of Buffer address @0x0C
230 * @vsize: Vertical Size @0x10
231 * @hsize: Horizontal Size @0x14
232 * @stride: Number of bytes between the first
233 * pixels of each horizontal line @0x18
234 */
235 struct xilinx_vdma_desc_hw {
236 u32 next_desc;
237 u32 pad1;
238 u32 buf_addr;
239 u32 buf_addr_msb;
240 u32 vsize;
241 u32 hsize;
242 u32 stride;
243 } __aligned(64);
244
245 /**
246 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
247 * @next_desc: Next Descriptor Pointer @0x00
248 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
249 * @buf_addr: Buffer address @0x08
250 * @buf_addr_msb: MSB of Buffer address @0x0C
251 * @reserved1: Reserved @0x10
252 * @reserved2: Reserved @0x14
253 * @control: Control field @0x18
254 * @status: Status field @0x1C
255 * @app: APP Fields @0x20 - 0x30
256 */
257 struct xilinx_axidma_desc_hw {
258 u32 next_desc;
259 u32 next_desc_msb;
260 u32 buf_addr;
261 u32 buf_addr_msb;
262 u32 reserved1;
263 u32 reserved2;
264 u32 control;
265 u32 status;
266 u32 app[XILINX_DMA_NUM_APP_WORDS];
267 } __aligned(64);
268
269 /**
270 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
271 * @next_desc: Next Descriptor Pointer @0x00
272 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
273 * @buf_addr: Buffer address @0x08
274 * @buf_addr_msb: MSB of Buffer address @0x0C
275 * @rsvd: Reserved field @0x10
276 * @control: Control Information field @0x14
277 * @status: Status field @0x18
278 * @sideband_status: Status of sideband signals @0x1C
279 * @app: APP Fields @0x20 - 0x30
280 */
281 struct xilinx_aximcdma_desc_hw {
282 u32 next_desc;
283 u32 next_desc_msb;
284 u32 buf_addr;
285 u32 buf_addr_msb;
286 u32 rsvd;
287 u32 control;
288 u32 status;
289 u32 sideband_status;
290 u32 app[XILINX_DMA_NUM_APP_WORDS];
291 } __aligned(64);
292
293 /**
294 * struct xilinx_cdma_desc_hw - Hardware Descriptor
295 * @next_desc: Next Descriptor Pointer @0x00
296 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
297 * @src_addr: Source address @0x08
298 * @src_addr_msb: Source address MSB @0x0C
299 * @dest_addr: Destination address @0x10
300 * @dest_addr_msb: Destination address MSB @0x14
301 * @control: Control field @0x18
302 * @status: Status field @0x1C
303 */
304 struct xilinx_cdma_desc_hw {
305 u32 next_desc;
306 u32 next_desc_msb;
307 u32 src_addr;
308 u32 src_addr_msb;
309 u32 dest_addr;
310 u32 dest_addr_msb;
311 u32 control;
312 u32 status;
313 } __aligned(64);
314
315 /**
316 * struct xilinx_vdma_tx_segment - Descriptor segment
317 * @hw: Hardware descriptor
318 * @node: Node in the descriptor segments list
319 * @phys: Physical address of segment
320 */
321 struct xilinx_vdma_tx_segment {
322 struct xilinx_vdma_desc_hw hw;
323 struct list_head node;
324 dma_addr_t phys;
325 } __aligned(64);
326
327 /**
328 * struct xilinx_axidma_tx_segment - Descriptor segment
329 * @hw: Hardware descriptor
330 * @node: Node in the descriptor segments list
331 * @phys: Physical address of segment
332 */
333 struct xilinx_axidma_tx_segment {
334 struct xilinx_axidma_desc_hw hw;
335 struct list_head node;
336 dma_addr_t phys;
337 } __aligned(64);
338
339 /**
340 * struct xilinx_aximcdma_tx_segment - Descriptor segment
341 * @hw: Hardware descriptor
342 * @node: Node in the descriptor segments list
343 * @phys: Physical address of segment
344 */
345 struct xilinx_aximcdma_tx_segment {
346 struct xilinx_aximcdma_desc_hw hw;
347 struct list_head node;
348 dma_addr_t phys;
349 } __aligned(64);
350
351 /**
352 * struct xilinx_cdma_tx_segment - Descriptor segment
353 * @hw: Hardware descriptor
354 * @node: Node in the descriptor segments list
355 * @phys: Physical address of segment
356 */
357 struct xilinx_cdma_tx_segment {
358 struct xilinx_cdma_desc_hw hw;
359 struct list_head node;
360 dma_addr_t phys;
361 } __aligned(64);
362
363 /**
364 * struct xilinx_dma_tx_descriptor - Per Transaction structure
365 * @async_tx: Async transaction descriptor
366 * @segments: TX segments list
367 * @node: Node in the channel descriptors list
368 * @cyclic: Check for cyclic transfers.
369 * @err: Whether the descriptor has an error.
370 * @residue: Residue of the completed descriptor
371 */
372 struct xilinx_dma_tx_descriptor {
373 struct dma_async_tx_descriptor async_tx;
374 struct list_head segments;
375 struct list_head node;
376 bool cyclic;
377 bool err;
378 u32 residue;
379 };
380
381 /**
382 * struct xilinx_dma_chan - Driver specific DMA channel structure
383 * @xdev: Driver specific device structure
384 * @ctrl_offset: Control registers offset
385 * @desc_offset: TX descriptor registers offset
386 * @lock: Descriptor operation lock
387 * @pending_list: Descriptors waiting
388 * @active_list: Descriptors ready to submit
389 * @done_list: Complete descriptors
390 * @free_seg_list: Free descriptors
391 * @common: DMA common channel
392 * @desc_pool: Descriptors pool
393 * @dev: The dma device
394 * @irq: Channel IRQ
395 * @id: Channel ID
396 * @direction: Transfer direction
397 * @num_frms: Number of frames
398 * @has_sg: Support scatter transfers
399 * @cyclic: Check for cyclic transfers.
400 * @genlock: Support genlock mode
401 * @err: Channel has errors
402 * @idle: Check for channel idle
403 * @terminating: Check for channel being synchronized by user
404 * @tasklet: Cleanup work after irq
405 * @config: Device configuration info
406 * @flush_on_fsync: Flush on Frame sync
407 * @desc_pendingcount: Descriptor pending count
408 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
409 * @desc_submitcount: Descriptor h/w submitted count
410 * @seg_v: Statically allocated segments base
411 * @seg_mv: Statically allocated segments base for MCDMA
412 * @seg_p: Physical allocated segments base
413 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
414 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
415 * @start_transfer: Differentiate b/w DMA IP's transfer
416 * @stop_transfer: Differentiate b/w DMA IP's quiesce
417 * @tdest: TDEST value for mcdma
418 * @has_vflip: S2MM vertical flip
419 * @irq_delay: Interrupt delay timeout
420 */
421 struct xilinx_dma_chan {
422 struct xilinx_dma_device *xdev;
423 u32 ctrl_offset;
424 u32 desc_offset;
425 spinlock_t lock;
426 struct list_head pending_list;
427 struct list_head active_list;
428 struct list_head done_list;
429 struct list_head free_seg_list;
430 struct dma_chan common;
431 struct dma_pool *desc_pool;
432 struct device *dev;
433 int irq;
434 int id;
435 enum dma_transfer_direction direction;
436 int num_frms;
437 bool has_sg;
438 bool cyclic;
439 bool genlock;
440 bool err;
441 bool idle;
442 bool terminating;
443 struct tasklet_struct tasklet;
444 struct xilinx_vdma_config config;
445 bool flush_on_fsync;
446 u32 desc_pendingcount;
447 bool ext_addr;
448 u32 desc_submitcount;
449 struct xilinx_axidma_tx_segment *seg_v;
450 struct xilinx_aximcdma_tx_segment *seg_mv;
451 dma_addr_t seg_p;
452 struct xilinx_axidma_tx_segment *cyclic_seg_v;
453 dma_addr_t cyclic_seg_p;
454 void (*start_transfer)(struct xilinx_dma_chan *chan);
455 int (*stop_transfer)(struct xilinx_dma_chan *chan);
456 u16 tdest;
457 bool has_vflip;
458 u8 irq_delay;
459 };
460
461 /**
462 * enum xdma_ip_type - DMA IP type.
463 *
464 * @XDMA_TYPE_AXIDMA: Axi dma ip.
465 * @XDMA_TYPE_CDMA: Axi cdma ip.
466 * @XDMA_TYPE_VDMA: Axi vdma ip.
467 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
468 *
469 */
470 enum xdma_ip_type {
471 XDMA_TYPE_AXIDMA = 0,
472 XDMA_TYPE_CDMA,
473 XDMA_TYPE_VDMA,
474 XDMA_TYPE_AXIMCDMA
475 };
476
477 struct xilinx_dma_config {
478 enum xdma_ip_type dmatype;
479 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
480 struct clk **tx_clk, struct clk **txs_clk,
481 struct clk **rx_clk, struct clk **rxs_clk);
482 irqreturn_t (*irq_handler)(int irq, void *data);
483 const int max_channels;
484 };
485
486 /**
487 * struct xilinx_dma_device - DMA device structure
488 * @regs: I/O mapped base address
489 * @dev: Device Structure
490 * @common: DMA device structure
491 * @chan: Driver specific DMA channel
492 * @flush_on_fsync: Flush on frame sync
493 * @ext_addr: Indicates 64 bit addressing is supported by dma device
494 * @pdev: Platform device structure pointer
495 * @dma_config: DMA config structure
496 * @axi_clk: DMA Axi4-lite interace clock
497 * @tx_clk: DMA mm2s clock
498 * @txs_clk: DMA mm2s stream clock
499 * @rx_clk: DMA s2mm clock
500 * @rxs_clk: DMA s2mm stream clock
501 * @s2mm_chan_id: DMA s2mm channel identifier
502 * @mm2s_chan_id: DMA mm2s channel identifier
503 * @max_buffer_len: Max buffer length
504 * @has_axistream_connected: AXI DMA connected to AXI Stream IP
505 */
506 struct xilinx_dma_device {
507 void __iomem *regs;
508 struct device *dev;
509 struct dma_device common;
510 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
511 u32 flush_on_fsync;
512 bool ext_addr;
513 struct platform_device *pdev;
514 const struct xilinx_dma_config *dma_config;
515 struct clk *axi_clk;
516 struct clk *tx_clk;
517 struct clk *txs_clk;
518 struct clk *rx_clk;
519 struct clk *rxs_clk;
520 u32 s2mm_chan_id;
521 u32 mm2s_chan_id;
522 u32 max_buffer_len;
523 bool has_axistream_connected;
524 };
525
526 /* Macros */
527 #define to_xilinx_chan(chan) \
528 container_of(chan, struct xilinx_dma_chan, common)
529 #define to_dma_tx_descriptor(tx) \
530 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
531 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
532 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
533 val, cond, delay_us, timeout_us)
534
535 /* IO accessors */
dma_read(struct xilinx_dma_chan * chan,u32 reg)536 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
537 {
538 return ioread32(chan->xdev->regs + reg);
539 }
540
dma_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)541 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
542 {
543 iowrite32(value, chan->xdev->regs + reg);
544 }
545
vdma_desc_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)546 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
547 u32 value)
548 {
549 dma_write(chan, chan->desc_offset + reg, value);
550 }
551
dma_ctrl_read(struct xilinx_dma_chan * chan,u32 reg)552 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
553 {
554 return dma_read(chan, chan->ctrl_offset + reg);
555 }
556
dma_ctrl_write(struct xilinx_dma_chan * chan,u32 reg,u32 value)557 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
558 u32 value)
559 {
560 dma_write(chan, chan->ctrl_offset + reg, value);
561 }
562
dma_ctrl_clr(struct xilinx_dma_chan * chan,u32 reg,u32 clr)563 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
564 u32 clr)
565 {
566 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
567 }
568
dma_ctrl_set(struct xilinx_dma_chan * chan,u32 reg,u32 set)569 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
570 u32 set)
571 {
572 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
573 }
574
575 /**
576 * vdma_desc_write_64 - 64-bit descriptor write
577 * @chan: Driver specific VDMA channel
578 * @reg: Register to write
579 * @value_lsb: lower address of the descriptor.
580 * @value_msb: upper address of the descriptor.
581 *
582 * Since vdma driver is trying to write to a register offset which is not a
583 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
584 * instead of a single 64 bit register write.
585 */
vdma_desc_write_64(struct xilinx_dma_chan * chan,u32 reg,u32 value_lsb,u32 value_msb)586 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
587 u32 value_lsb, u32 value_msb)
588 {
589 /* Write the lsb 32 bits*/
590 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
591
592 /* Write the msb 32 bits */
593 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
594 }
595
dma_writeq(struct xilinx_dma_chan * chan,u32 reg,u64 value)596 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
597 {
598 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
599 }
600
xilinx_write(struct xilinx_dma_chan * chan,u32 reg,dma_addr_t addr)601 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
602 dma_addr_t addr)
603 {
604 if (chan->ext_addr)
605 dma_writeq(chan, reg, addr);
606 else
607 dma_ctrl_write(chan, reg, addr);
608 }
609
xilinx_axidma_buf(struct xilinx_dma_chan * chan,struct xilinx_axidma_desc_hw * hw,dma_addr_t buf_addr,size_t sg_used,size_t period_len)610 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
611 struct xilinx_axidma_desc_hw *hw,
612 dma_addr_t buf_addr, size_t sg_used,
613 size_t period_len)
614 {
615 if (chan->ext_addr) {
616 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
617 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
618 period_len);
619 } else {
620 hw->buf_addr = buf_addr + sg_used + period_len;
621 }
622 }
623
xilinx_aximcdma_buf(struct xilinx_dma_chan * chan,struct xilinx_aximcdma_desc_hw * hw,dma_addr_t buf_addr,size_t sg_used)624 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
625 struct xilinx_aximcdma_desc_hw *hw,
626 dma_addr_t buf_addr, size_t sg_used)
627 {
628 if (chan->ext_addr) {
629 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
630 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
631 } else {
632 hw->buf_addr = buf_addr + sg_used;
633 }
634 }
635
636 /**
637 * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
638 * @tx: async transaction descriptor
639 * @payload_len: metadata payload length
640 * @max_len: metadata max length
641 * Return: The app field pointer.
642 */
xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor * tx,size_t * payload_len,size_t * max_len)643 static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
644 size_t *payload_len, size_t *max_len)
645 {
646 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
647 struct xilinx_axidma_tx_segment *seg;
648
649 *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
650 seg = list_first_entry(&desc->segments,
651 struct xilinx_axidma_tx_segment, node);
652 return seg->hw.app;
653 }
654
655 static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
656 .get_ptr = xilinx_dma_get_metadata_ptr,
657 };
658
659 /* -----------------------------------------------------------------------------
660 * Descriptors and segments alloc and free
661 */
662
663 /**
664 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
665 * @chan: Driver specific DMA channel
666 *
667 * Return: The allocated segment on success and NULL on failure.
668 */
669 static struct xilinx_vdma_tx_segment *
xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan * chan)670 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
671 {
672 struct xilinx_vdma_tx_segment *segment;
673 dma_addr_t phys;
674
675 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
676 if (!segment)
677 return NULL;
678
679 segment->phys = phys;
680
681 return segment;
682 }
683
684 /**
685 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
686 * @chan: Driver specific DMA channel
687 *
688 * Return: The allocated segment on success and NULL on failure.
689 */
690 static struct xilinx_cdma_tx_segment *
xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan * chan)691 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
692 {
693 struct xilinx_cdma_tx_segment *segment;
694 dma_addr_t phys;
695
696 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
697 if (!segment)
698 return NULL;
699
700 segment->phys = phys;
701
702 return segment;
703 }
704
705 /**
706 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
707 * @chan: Driver specific DMA channel
708 *
709 * Return: The allocated segment on success and NULL on failure.
710 */
711 static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan * chan)712 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
713 {
714 struct xilinx_axidma_tx_segment *segment = NULL;
715 unsigned long flags;
716
717 spin_lock_irqsave(&chan->lock, flags);
718 if (!list_empty(&chan->free_seg_list)) {
719 segment = list_first_entry(&chan->free_seg_list,
720 struct xilinx_axidma_tx_segment,
721 node);
722 list_del(&segment->node);
723 }
724 spin_unlock_irqrestore(&chan->lock, flags);
725
726 if (!segment)
727 dev_dbg(chan->dev, "Could not find free tx segment\n");
728
729 return segment;
730 }
731
732 /**
733 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
734 * @chan: Driver specific DMA channel
735 *
736 * Return: The allocated segment on success and NULL on failure.
737 */
738 static struct xilinx_aximcdma_tx_segment *
xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan * chan)739 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
740 {
741 struct xilinx_aximcdma_tx_segment *segment = NULL;
742 unsigned long flags;
743
744 spin_lock_irqsave(&chan->lock, flags);
745 if (!list_empty(&chan->free_seg_list)) {
746 segment = list_first_entry(&chan->free_seg_list,
747 struct xilinx_aximcdma_tx_segment,
748 node);
749 list_del(&segment->node);
750 }
751 spin_unlock_irqrestore(&chan->lock, flags);
752
753 return segment;
754 }
755
xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw * hw)756 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
757 {
758 u32 next_desc = hw->next_desc;
759 u32 next_desc_msb = hw->next_desc_msb;
760
761 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
762
763 hw->next_desc = next_desc;
764 hw->next_desc_msb = next_desc_msb;
765 }
766
xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw * hw)767 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
768 {
769 u32 next_desc = hw->next_desc;
770 u32 next_desc_msb = hw->next_desc_msb;
771
772 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
773
774 hw->next_desc = next_desc;
775 hw->next_desc_msb = next_desc_msb;
776 }
777
778 /**
779 * xilinx_dma_free_tx_segment - Free transaction segment
780 * @chan: Driver specific DMA channel
781 * @segment: DMA transaction segment
782 */
xilinx_dma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_axidma_tx_segment * segment)783 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
784 struct xilinx_axidma_tx_segment *segment)
785 {
786 xilinx_dma_clean_hw_desc(&segment->hw);
787
788 list_add_tail(&segment->node, &chan->free_seg_list);
789 }
790
791 /**
792 * xilinx_mcdma_free_tx_segment - Free transaction segment
793 * @chan: Driver specific DMA channel
794 * @segment: DMA transaction segment
795 */
xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_aximcdma_tx_segment * segment)796 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
797 struct xilinx_aximcdma_tx_segment *
798 segment)
799 {
800 xilinx_mcdma_clean_hw_desc(&segment->hw);
801
802 list_add_tail(&segment->node, &chan->free_seg_list);
803 }
804
805 /**
806 * xilinx_cdma_free_tx_segment - Free transaction segment
807 * @chan: Driver specific DMA channel
808 * @segment: DMA transaction segment
809 */
xilinx_cdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_cdma_tx_segment * segment)810 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
811 struct xilinx_cdma_tx_segment *segment)
812 {
813 dma_pool_free(chan->desc_pool, segment, segment->phys);
814 }
815
816 /**
817 * xilinx_vdma_free_tx_segment - Free transaction segment
818 * @chan: Driver specific DMA channel
819 * @segment: DMA transaction segment
820 */
xilinx_vdma_free_tx_segment(struct xilinx_dma_chan * chan,struct xilinx_vdma_tx_segment * segment)821 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
822 struct xilinx_vdma_tx_segment *segment)
823 {
824 dma_pool_free(chan->desc_pool, segment, segment->phys);
825 }
826
827 /**
828 * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
829 * @chan: Driver specific DMA channel
830 *
831 * Return: The allocated descriptor on success and NULL on failure.
832 */
833 static struct xilinx_dma_tx_descriptor *
xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan * chan)834 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
835 {
836 struct xilinx_dma_tx_descriptor *desc;
837
838 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
839 if (!desc)
840 return NULL;
841
842 INIT_LIST_HEAD(&desc->segments);
843
844 return desc;
845 }
846
847 /**
848 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
849 * @chan: Driver specific DMA channel
850 * @desc: DMA transaction descriptor
851 */
852 static void
xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)853 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
854 struct xilinx_dma_tx_descriptor *desc)
855 {
856 struct xilinx_vdma_tx_segment *segment, *next;
857 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
858 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
859 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
860
861 if (!desc)
862 return;
863
864 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
865 list_for_each_entry_safe(segment, next, &desc->segments, node) {
866 list_del(&segment->node);
867 xilinx_vdma_free_tx_segment(chan, segment);
868 }
869 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
870 list_for_each_entry_safe(cdma_segment, cdma_next,
871 &desc->segments, node) {
872 list_del(&cdma_segment->node);
873 xilinx_cdma_free_tx_segment(chan, cdma_segment);
874 }
875 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
876 list_for_each_entry_safe(axidma_segment, axidma_next,
877 &desc->segments, node) {
878 list_del(&axidma_segment->node);
879 xilinx_dma_free_tx_segment(chan, axidma_segment);
880 }
881 } else {
882 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
883 &desc->segments, node) {
884 list_del(&aximcdma_segment->node);
885 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
886 }
887 }
888
889 kfree(desc);
890 }
891
892 /* Required functions */
893
894 /**
895 * xilinx_dma_free_desc_list - Free descriptors list
896 * @chan: Driver specific DMA channel
897 * @list: List to parse and delete the descriptor
898 */
xilinx_dma_free_desc_list(struct xilinx_dma_chan * chan,struct list_head * list)899 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
900 struct list_head *list)
901 {
902 struct xilinx_dma_tx_descriptor *desc, *next;
903
904 list_for_each_entry_safe(desc, next, list, node) {
905 list_del(&desc->node);
906 xilinx_dma_free_tx_descriptor(chan, desc);
907 }
908 }
909
910 /**
911 * xilinx_dma_free_descriptors - Free channel descriptors
912 * @chan: Driver specific DMA channel
913 */
xilinx_dma_free_descriptors(struct xilinx_dma_chan * chan)914 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
915 {
916 unsigned long flags;
917
918 spin_lock_irqsave(&chan->lock, flags);
919
920 xilinx_dma_free_desc_list(chan, &chan->pending_list);
921 xilinx_dma_free_desc_list(chan, &chan->done_list);
922 xilinx_dma_free_desc_list(chan, &chan->active_list);
923
924 spin_unlock_irqrestore(&chan->lock, flags);
925 }
926
927 /**
928 * xilinx_dma_free_chan_resources - Free channel resources
929 * @dchan: DMA channel
930 */
xilinx_dma_free_chan_resources(struct dma_chan * dchan)931 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
932 {
933 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
934 unsigned long flags;
935
936 dev_dbg(chan->dev, "Free all channel resources.\n");
937
938 xilinx_dma_free_descriptors(chan);
939
940 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
941 spin_lock_irqsave(&chan->lock, flags);
942 INIT_LIST_HEAD(&chan->free_seg_list);
943 spin_unlock_irqrestore(&chan->lock, flags);
944
945 /* Free memory that is allocated for BD */
946 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
947 XILINX_DMA_NUM_DESCS, chan->seg_v,
948 chan->seg_p);
949
950 /* Free Memory that is allocated for cyclic DMA Mode */
951 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
952 chan->cyclic_seg_v, chan->cyclic_seg_p);
953 }
954
955 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
956 spin_lock_irqsave(&chan->lock, flags);
957 INIT_LIST_HEAD(&chan->free_seg_list);
958 spin_unlock_irqrestore(&chan->lock, flags);
959
960 /* Free memory that is allocated for BD */
961 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
962 XILINX_DMA_NUM_DESCS, chan->seg_mv,
963 chan->seg_p);
964 }
965
966 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
967 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
968 dma_pool_destroy(chan->desc_pool);
969 chan->desc_pool = NULL;
970 }
971
972 }
973
974 /**
975 * xilinx_dma_get_residue - Compute residue for a given descriptor
976 * @chan: Driver specific dma channel
977 * @desc: dma transaction descriptor
978 *
979 * Return: The number of residue bytes for the descriptor.
980 */
xilinx_dma_get_residue(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)981 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
982 struct xilinx_dma_tx_descriptor *desc)
983 {
984 struct xilinx_cdma_tx_segment *cdma_seg;
985 struct xilinx_axidma_tx_segment *axidma_seg;
986 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
987 struct xilinx_cdma_desc_hw *cdma_hw;
988 struct xilinx_axidma_desc_hw *axidma_hw;
989 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
990 struct list_head *entry;
991 u32 residue = 0;
992
993 list_for_each(entry, &desc->segments) {
994 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
995 cdma_seg = list_entry(entry,
996 struct xilinx_cdma_tx_segment,
997 node);
998 cdma_hw = &cdma_seg->hw;
999 residue += (cdma_hw->control - cdma_hw->status) &
1000 chan->xdev->max_buffer_len;
1001 } else if (chan->xdev->dma_config->dmatype ==
1002 XDMA_TYPE_AXIDMA) {
1003 axidma_seg = list_entry(entry,
1004 struct xilinx_axidma_tx_segment,
1005 node);
1006 axidma_hw = &axidma_seg->hw;
1007 residue += (axidma_hw->control - axidma_hw->status) &
1008 chan->xdev->max_buffer_len;
1009 } else {
1010 aximcdma_seg =
1011 list_entry(entry,
1012 struct xilinx_aximcdma_tx_segment,
1013 node);
1014 aximcdma_hw = &aximcdma_seg->hw;
1015 residue +=
1016 (aximcdma_hw->control - aximcdma_hw->status) &
1017 chan->xdev->max_buffer_len;
1018 }
1019 }
1020
1021 return residue;
1022 }
1023
1024 /**
1025 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
1026 * @chan: Driver specific dma channel
1027 * @desc: dma transaction descriptor
1028 * @flags: flags for spin lock
1029 */
xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc,unsigned long * flags)1030 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
1031 struct xilinx_dma_tx_descriptor *desc,
1032 unsigned long *flags)
1033 {
1034 struct dmaengine_desc_callback cb;
1035
1036 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1037 if (dmaengine_desc_callback_valid(&cb)) {
1038 spin_unlock_irqrestore(&chan->lock, *flags);
1039 dmaengine_desc_callback_invoke(&cb, NULL);
1040 spin_lock_irqsave(&chan->lock, *flags);
1041 }
1042 }
1043
1044 /**
1045 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
1046 * @chan: Driver specific DMA channel
1047 */
xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan * chan)1048 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1049 {
1050 struct xilinx_dma_tx_descriptor *desc, *next;
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&chan->lock, flags);
1054
1055 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1056 struct dmaengine_result result;
1057
1058 if (desc->cyclic) {
1059 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1060 break;
1061 }
1062
1063 /* Remove from the list of running transactions */
1064 list_del(&desc->node);
1065
1066 if (unlikely(desc->err)) {
1067 if (chan->direction == DMA_DEV_TO_MEM)
1068 result.result = DMA_TRANS_READ_FAILED;
1069 else
1070 result.result = DMA_TRANS_WRITE_FAILED;
1071 } else {
1072 result.result = DMA_TRANS_NOERROR;
1073 }
1074
1075 result.residue = desc->residue;
1076
1077 /* Run the link descriptor callback function */
1078 spin_unlock_irqrestore(&chan->lock, flags);
1079 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1080 spin_lock_irqsave(&chan->lock, flags);
1081
1082 /* Run any dependencies, then free the descriptor */
1083 dma_run_dependencies(&desc->async_tx);
1084 xilinx_dma_free_tx_descriptor(chan, desc);
1085
1086 /*
1087 * While we ran a callback the user called a terminate function,
1088 * which takes care of cleaning up any remaining descriptors
1089 */
1090 if (chan->terminating)
1091 break;
1092 }
1093
1094 spin_unlock_irqrestore(&chan->lock, flags);
1095 }
1096
1097 /**
1098 * xilinx_dma_do_tasklet - Schedule completion tasklet
1099 * @t: Pointer to the Xilinx DMA channel structure
1100 */
xilinx_dma_do_tasklet(struct tasklet_struct * t)1101 static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1102 {
1103 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1104
1105 xilinx_dma_chan_desc_cleanup(chan);
1106 }
1107
1108 /**
1109 * xilinx_dma_alloc_chan_resources - Allocate channel resources
1110 * @dchan: DMA channel
1111 *
1112 * Return: '0' on success and failure value on error
1113 */
xilinx_dma_alloc_chan_resources(struct dma_chan * dchan)1114 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1115 {
1116 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1117 int i;
1118
1119 /* Has this channel already been allocated? */
1120 if (chan->desc_pool)
1121 return 0;
1122
1123 /*
1124 * We need the descriptor to be aligned to 64bytes
1125 * for meeting Xilinx VDMA specification requirement.
1126 */
1127 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1128 /* Allocate the buffer descriptors. */
1129 chan->seg_v = dma_alloc_coherent(chan->dev,
1130 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1131 &chan->seg_p, GFP_KERNEL);
1132 if (!chan->seg_v) {
1133 dev_err(chan->dev,
1134 "unable to allocate channel %d descriptors\n",
1135 chan->id);
1136 return -ENOMEM;
1137 }
1138 /*
1139 * For cyclic DMA mode we need to program the tail Descriptor
1140 * register with a value which is not a part of the BD chain
1141 * so allocating a desc segment during channel allocation for
1142 * programming tail descriptor.
1143 */
1144 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1145 sizeof(*chan->cyclic_seg_v),
1146 &chan->cyclic_seg_p,
1147 GFP_KERNEL);
1148 if (!chan->cyclic_seg_v) {
1149 dev_err(chan->dev,
1150 "unable to allocate desc segment for cyclic DMA\n");
1151 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1152 XILINX_DMA_NUM_DESCS, chan->seg_v,
1153 chan->seg_p);
1154 return -ENOMEM;
1155 }
1156 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1157
1158 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1159 chan->seg_v[i].hw.next_desc =
1160 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1161 ((i + 1) % XILINX_DMA_NUM_DESCS));
1162 chan->seg_v[i].hw.next_desc_msb =
1163 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1164 ((i + 1) % XILINX_DMA_NUM_DESCS));
1165 chan->seg_v[i].phys = chan->seg_p +
1166 sizeof(*chan->seg_v) * i;
1167 list_add_tail(&chan->seg_v[i].node,
1168 &chan->free_seg_list);
1169 }
1170 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1171 /* Allocate the buffer descriptors. */
1172 chan->seg_mv = dma_alloc_coherent(chan->dev,
1173 sizeof(*chan->seg_mv) *
1174 XILINX_DMA_NUM_DESCS,
1175 &chan->seg_p, GFP_KERNEL);
1176 if (!chan->seg_mv) {
1177 dev_err(chan->dev,
1178 "unable to allocate channel %d descriptors\n",
1179 chan->id);
1180 return -ENOMEM;
1181 }
1182 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1183 chan->seg_mv[i].hw.next_desc =
1184 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1185 ((i + 1) % XILINX_DMA_NUM_DESCS));
1186 chan->seg_mv[i].hw.next_desc_msb =
1187 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1188 ((i + 1) % XILINX_DMA_NUM_DESCS));
1189 chan->seg_mv[i].phys = chan->seg_p +
1190 sizeof(*chan->seg_mv) * i;
1191 list_add_tail(&chan->seg_mv[i].node,
1192 &chan->free_seg_list);
1193 }
1194 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1195 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1196 chan->dev,
1197 sizeof(struct xilinx_cdma_tx_segment),
1198 __alignof__(struct xilinx_cdma_tx_segment),
1199 0);
1200 } else {
1201 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1202 chan->dev,
1203 sizeof(struct xilinx_vdma_tx_segment),
1204 __alignof__(struct xilinx_vdma_tx_segment),
1205 0);
1206 }
1207
1208 if (!chan->desc_pool &&
1209 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1210 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1211 dev_err(chan->dev,
1212 "unable to allocate channel %d descriptor pool\n",
1213 chan->id);
1214 return -ENOMEM;
1215 }
1216
1217 dma_cookie_init(dchan);
1218
1219 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1220 /* For AXI DMA resetting once channel will reset the
1221 * other channel as well so enable the interrupts here.
1222 */
1223 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1224 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1225 }
1226
1227 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1228 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1229 XILINX_CDMA_CR_SGMODE);
1230
1231 return 0;
1232 }
1233
1234 /**
1235 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1236 * @chan: Driver specific DMA channel
1237 * @size: Total data that needs to be copied
1238 * @done: Amount of data that has been already copied
1239 *
1240 * Return: Amount of data that has to be copied
1241 */
xilinx_dma_calc_copysize(struct xilinx_dma_chan * chan,int size,int done)1242 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1243 int size, int done)
1244 {
1245 size_t copy;
1246
1247 copy = min_t(size_t, size - done,
1248 chan->xdev->max_buffer_len);
1249
1250 if ((copy + done < size) &&
1251 chan->xdev->common.copy_align) {
1252 /*
1253 * If this is not the last descriptor, make sure
1254 * the next one will be properly aligned
1255 */
1256 copy = rounddown(copy,
1257 (1 << chan->xdev->common.copy_align));
1258 }
1259 return copy;
1260 }
1261
1262 /**
1263 * xilinx_dma_tx_status - Get DMA transaction status
1264 * @dchan: DMA channel
1265 * @cookie: Transaction identifier
1266 * @txstate: Transaction state
1267 *
1268 * Return: DMA transaction status
1269 */
xilinx_dma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1270 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1271 dma_cookie_t cookie,
1272 struct dma_tx_state *txstate)
1273 {
1274 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1275 struct xilinx_dma_tx_descriptor *desc;
1276 enum dma_status ret;
1277 unsigned long flags;
1278 u32 residue = 0;
1279
1280 ret = dma_cookie_status(dchan, cookie, txstate);
1281 if (ret == DMA_COMPLETE || !txstate)
1282 return ret;
1283
1284 spin_lock_irqsave(&chan->lock, flags);
1285 if (!list_empty(&chan->active_list)) {
1286 desc = list_last_entry(&chan->active_list,
1287 struct xilinx_dma_tx_descriptor, node);
1288 /*
1289 * VDMA and simple mode do not support residue reporting, so the
1290 * residue field will always be 0.
1291 */
1292 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1293 residue = xilinx_dma_get_residue(chan, desc);
1294 }
1295 spin_unlock_irqrestore(&chan->lock, flags);
1296
1297 dma_set_residue(txstate, residue);
1298
1299 return ret;
1300 }
1301
1302 /**
1303 * xilinx_dma_stop_transfer - Halt DMA channel
1304 * @chan: Driver specific DMA channel
1305 *
1306 * Return: '0' on success and failure value on error
1307 */
xilinx_dma_stop_transfer(struct xilinx_dma_chan * chan)1308 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1309 {
1310 u32 val;
1311
1312 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1313
1314 /* Wait for the hardware to halt */
1315 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1316 val & XILINX_DMA_DMASR_HALTED, 0,
1317 XILINX_DMA_LOOP_COUNT);
1318 }
1319
1320 /**
1321 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1322 * @chan: Driver specific DMA channel
1323 *
1324 * Return: '0' on success and failure value on error
1325 */
xilinx_cdma_stop_transfer(struct xilinx_dma_chan * chan)1326 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1327 {
1328 u32 val;
1329
1330 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1331 val & XILINX_DMA_DMASR_IDLE, 0,
1332 XILINX_DMA_LOOP_COUNT);
1333 }
1334
1335 /**
1336 * xilinx_dma_start - Start DMA channel
1337 * @chan: Driver specific DMA channel
1338 */
xilinx_dma_start(struct xilinx_dma_chan * chan)1339 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1340 {
1341 int err;
1342 u32 val;
1343
1344 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1345
1346 /* Wait for the hardware to start */
1347 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1348 !(val & XILINX_DMA_DMASR_HALTED), 0,
1349 XILINX_DMA_LOOP_COUNT);
1350
1351 if (err) {
1352 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1353 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1354
1355 chan->err = true;
1356 }
1357 }
1358
1359 /**
1360 * xilinx_vdma_start_transfer - Starts VDMA transfer
1361 * @chan: Driver specific channel struct pointer
1362 */
xilinx_vdma_start_transfer(struct xilinx_dma_chan * chan)1363 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1364 {
1365 struct xilinx_vdma_config *config = &chan->config;
1366 struct xilinx_dma_tx_descriptor *desc;
1367 u32 reg, j;
1368 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1369 int i = 0;
1370
1371 /* This function was invoked with lock held */
1372 if (chan->err)
1373 return;
1374
1375 if (!chan->idle)
1376 return;
1377
1378 if (list_empty(&chan->pending_list))
1379 return;
1380
1381 desc = list_first_entry(&chan->pending_list,
1382 struct xilinx_dma_tx_descriptor, node);
1383
1384 /* Configure the hardware using info in the config structure */
1385 if (chan->has_vflip) {
1386 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1387 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1388 reg |= config->vflip_en;
1389 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1390 reg);
1391 }
1392
1393 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1394
1395 if (config->frm_cnt_en)
1396 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1397 else
1398 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1399
1400 /* If not parking, enable circular mode */
1401 if (config->park)
1402 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1403 else
1404 reg |= XILINX_DMA_DMACR_CIRC_EN;
1405
1406 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1407
1408 if (config->park) {
1409 j = chan->desc_submitcount;
1410 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1411 if (chan->direction == DMA_MEM_TO_DEV) {
1412 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1413 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1414 } else {
1415 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1416 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1417 }
1418 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1419 }
1420
1421 /* Start the hardware */
1422 xilinx_dma_start(chan);
1423
1424 if (chan->err)
1425 return;
1426
1427 /* Start the transfer */
1428 if (chan->desc_submitcount < chan->num_frms)
1429 i = chan->desc_submitcount;
1430
1431 list_for_each_entry(segment, &desc->segments, node) {
1432 if (chan->ext_addr)
1433 vdma_desc_write_64(chan,
1434 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1435 segment->hw.buf_addr,
1436 segment->hw.buf_addr_msb);
1437 else
1438 vdma_desc_write(chan,
1439 XILINX_VDMA_REG_START_ADDRESS(i++),
1440 segment->hw.buf_addr);
1441
1442 last = segment;
1443 }
1444
1445 if (!last)
1446 return;
1447
1448 /* HW expects these parameters to be same for one transaction */
1449 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1450 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1451 last->hw.stride);
1452 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1453
1454 chan->desc_submitcount++;
1455 chan->desc_pendingcount--;
1456 list_move_tail(&desc->node, &chan->active_list);
1457 if (chan->desc_submitcount == chan->num_frms)
1458 chan->desc_submitcount = 0;
1459
1460 chan->idle = false;
1461 }
1462
1463 /**
1464 * xilinx_cdma_start_transfer - Starts cdma transfer
1465 * @chan: Driver specific channel struct pointer
1466 */
xilinx_cdma_start_transfer(struct xilinx_dma_chan * chan)1467 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1468 {
1469 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1470 struct xilinx_cdma_tx_segment *tail_segment;
1471 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1472
1473 if (chan->err)
1474 return;
1475
1476 if (!chan->idle)
1477 return;
1478
1479 if (list_empty(&chan->pending_list))
1480 return;
1481
1482 head_desc = list_first_entry(&chan->pending_list,
1483 struct xilinx_dma_tx_descriptor, node);
1484 tail_desc = list_last_entry(&chan->pending_list,
1485 struct xilinx_dma_tx_descriptor, node);
1486 tail_segment = list_last_entry(&tail_desc->segments,
1487 struct xilinx_cdma_tx_segment, node);
1488
1489 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1490 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1491 ctrl_reg |= chan->desc_pendingcount <<
1492 XILINX_DMA_CR_COALESCE_SHIFT;
1493 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1494 }
1495
1496 if (chan->has_sg) {
1497 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1498 XILINX_CDMA_CR_SGMODE);
1499
1500 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1501 XILINX_CDMA_CR_SGMODE);
1502
1503 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1504 head_desc->async_tx.phys);
1505
1506 /* Update tail ptr register which will start the transfer */
1507 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1508 tail_segment->phys);
1509 } else {
1510 /* In simple mode */
1511 struct xilinx_cdma_tx_segment *segment;
1512 struct xilinx_cdma_desc_hw *hw;
1513
1514 segment = list_first_entry(&head_desc->segments,
1515 struct xilinx_cdma_tx_segment,
1516 node);
1517
1518 hw = &segment->hw;
1519
1520 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1521 xilinx_prep_dma_addr_t(hw->src_addr));
1522 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1523 xilinx_prep_dma_addr_t(hw->dest_addr));
1524
1525 /* Start the transfer */
1526 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1527 hw->control & chan->xdev->max_buffer_len);
1528 }
1529
1530 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1531 chan->desc_pendingcount = 0;
1532 chan->idle = false;
1533 }
1534
1535 /**
1536 * xilinx_dma_start_transfer - Starts DMA transfer
1537 * @chan: Driver specific channel struct pointer
1538 */
xilinx_dma_start_transfer(struct xilinx_dma_chan * chan)1539 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1540 {
1541 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1542 struct xilinx_axidma_tx_segment *tail_segment;
1543 u32 reg;
1544
1545 if (chan->err)
1546 return;
1547
1548 if (list_empty(&chan->pending_list))
1549 return;
1550
1551 if (!chan->idle)
1552 return;
1553
1554 head_desc = list_first_entry(&chan->pending_list,
1555 struct xilinx_dma_tx_descriptor, node);
1556 tail_desc = list_last_entry(&chan->pending_list,
1557 struct xilinx_dma_tx_descriptor, node);
1558 tail_segment = list_last_entry(&tail_desc->segments,
1559 struct xilinx_axidma_tx_segment, node);
1560
1561 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1562
1563 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1564 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1565 reg |= chan->desc_pendingcount <<
1566 XILINX_DMA_CR_COALESCE_SHIFT;
1567 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1568 }
1569
1570 if (chan->has_sg)
1571 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1572 head_desc->async_tx.phys);
1573 reg &= ~XILINX_DMA_CR_DELAY_MAX;
1574 reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
1575 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1576
1577 xilinx_dma_start(chan);
1578
1579 if (chan->err)
1580 return;
1581
1582 /* Start the transfer */
1583 if (chan->has_sg) {
1584 if (chan->cyclic)
1585 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1586 chan->cyclic_seg_v->phys);
1587 else
1588 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1589 tail_segment->phys);
1590 } else {
1591 struct xilinx_axidma_tx_segment *segment;
1592 struct xilinx_axidma_desc_hw *hw;
1593
1594 segment = list_first_entry(&head_desc->segments,
1595 struct xilinx_axidma_tx_segment,
1596 node);
1597 hw = &segment->hw;
1598
1599 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1600 xilinx_prep_dma_addr_t(hw->buf_addr));
1601
1602 /* Start the transfer */
1603 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1604 hw->control & chan->xdev->max_buffer_len);
1605 }
1606
1607 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1608 chan->desc_pendingcount = 0;
1609 chan->idle = false;
1610 }
1611
1612 /**
1613 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1614 * @chan: Driver specific channel struct pointer
1615 */
xilinx_mcdma_start_transfer(struct xilinx_dma_chan * chan)1616 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1617 {
1618 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1619 struct xilinx_aximcdma_tx_segment *tail_segment;
1620 u32 reg;
1621
1622 /*
1623 * lock has been held by calling functions, so we don't need it
1624 * to take it here again.
1625 */
1626
1627 if (chan->err)
1628 return;
1629
1630 if (!chan->idle)
1631 return;
1632
1633 if (list_empty(&chan->pending_list))
1634 return;
1635
1636 head_desc = list_first_entry(&chan->pending_list,
1637 struct xilinx_dma_tx_descriptor, node);
1638 tail_desc = list_last_entry(&chan->pending_list,
1639 struct xilinx_dma_tx_descriptor, node);
1640 tail_segment = list_last_entry(&tail_desc->segments,
1641 struct xilinx_aximcdma_tx_segment, node);
1642
1643 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1644
1645 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1646 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1647 reg |= chan->desc_pendingcount <<
1648 XILINX_MCDMA_COALESCE_SHIFT;
1649 }
1650
1651 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1652 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1653
1654 /* Program current descriptor */
1655 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1656 head_desc->async_tx.phys);
1657
1658 /* Program channel enable register */
1659 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1660 reg |= BIT(chan->tdest);
1661 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1662
1663 /* Start the fetch of BDs for the channel */
1664 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1665 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1666 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1667
1668 xilinx_dma_start(chan);
1669
1670 if (chan->err)
1671 return;
1672
1673 /* Start the transfer */
1674 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1675 tail_segment->phys);
1676
1677 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1678 chan->desc_pendingcount = 0;
1679 chan->idle = false;
1680 }
1681
1682 /**
1683 * xilinx_dma_issue_pending - Issue pending transactions
1684 * @dchan: DMA channel
1685 */
xilinx_dma_issue_pending(struct dma_chan * dchan)1686 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1687 {
1688 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1689 unsigned long flags;
1690
1691 spin_lock_irqsave(&chan->lock, flags);
1692 chan->start_transfer(chan);
1693 spin_unlock_irqrestore(&chan->lock, flags);
1694 }
1695
1696 /**
1697 * xilinx_dma_device_config - Configure the DMA channel
1698 * @dchan: DMA channel
1699 * @config: channel configuration
1700 *
1701 * Return: 0 always.
1702 */
xilinx_dma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)1703 static int xilinx_dma_device_config(struct dma_chan *dchan,
1704 struct dma_slave_config *config)
1705 {
1706 return 0;
1707 }
1708
1709 /**
1710 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1711 * @chan : xilinx DMA channel
1712 *
1713 * CONTEXT: hardirq
1714 */
xilinx_dma_complete_descriptor(struct xilinx_dma_chan * chan)1715 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1716 {
1717 struct xilinx_dma_tx_descriptor *desc, *next;
1718
1719 /* This function was invoked with lock held */
1720 if (list_empty(&chan->active_list))
1721 return;
1722
1723 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1724 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1725 struct xilinx_axidma_tx_segment *seg;
1726
1727 seg = list_last_entry(&desc->segments,
1728 struct xilinx_axidma_tx_segment, node);
1729 if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
1730 break;
1731 }
1732 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1733 XDMA_TYPE_VDMA)
1734 desc->residue = xilinx_dma_get_residue(chan, desc);
1735 else
1736 desc->residue = 0;
1737 desc->err = chan->err;
1738
1739 list_del(&desc->node);
1740 if (!desc->cyclic)
1741 dma_cookie_complete(&desc->async_tx);
1742 list_add_tail(&desc->node, &chan->done_list);
1743 }
1744 }
1745
1746 /**
1747 * xilinx_dma_reset - Reset DMA channel
1748 * @chan: Driver specific DMA channel
1749 *
1750 * Return: '0' on success and failure value on error
1751 */
xilinx_dma_reset(struct xilinx_dma_chan * chan)1752 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1753 {
1754 int err;
1755 u32 tmp;
1756
1757 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1758
1759 /* Wait for the hardware to finish reset */
1760 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1761 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1762 XILINX_DMA_LOOP_COUNT);
1763
1764 if (err) {
1765 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1766 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1767 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1768 return -ETIMEDOUT;
1769 }
1770
1771 chan->err = false;
1772 chan->idle = true;
1773 chan->desc_pendingcount = 0;
1774 chan->desc_submitcount = 0;
1775
1776 return err;
1777 }
1778
1779 /**
1780 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1781 * @chan: Driver specific DMA channel
1782 *
1783 * Return: '0' on success and failure value on error
1784 */
xilinx_dma_chan_reset(struct xilinx_dma_chan * chan)1785 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1786 {
1787 int err;
1788
1789 /* Reset VDMA */
1790 err = xilinx_dma_reset(chan);
1791 if (err)
1792 return err;
1793
1794 /* Enable interrupts */
1795 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1796 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1797
1798 return 0;
1799 }
1800
1801 /**
1802 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1803 * @irq: IRQ number
1804 * @data: Pointer to the Xilinx MCDMA channel structure
1805 *
1806 * Return: IRQ_HANDLED/IRQ_NONE
1807 */
xilinx_mcdma_irq_handler(int irq,void * data)1808 static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1809 {
1810 struct xilinx_dma_chan *chan = data;
1811 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1812
1813 if (chan->direction == DMA_DEV_TO_MEM)
1814 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1815 else
1816 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1817
1818 /* Read the channel id raising the interrupt*/
1819 chan_sermask = dma_ctrl_read(chan, ser_offset);
1820 chan_id = ffs(chan_sermask);
1821
1822 if (!chan_id)
1823 return IRQ_NONE;
1824
1825 if (chan->direction == DMA_DEV_TO_MEM)
1826 chan_offset = chan->xdev->dma_config->max_channels / 2;
1827
1828 chan_offset = chan_offset + (chan_id - 1);
1829 chan = chan->xdev->chan[chan_offset];
1830 /* Read the status and ack the interrupts. */
1831 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1832 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1833 return IRQ_NONE;
1834
1835 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1836 status & XILINX_MCDMA_IRQ_ALL_MASK);
1837
1838 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1839 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1840 chan,
1841 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1842 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1843 (chan->tdest)),
1844 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1845 (chan->tdest)));
1846 chan->err = true;
1847 }
1848
1849 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1850 /*
1851 * Device takes too long to do the transfer when user requires
1852 * responsiveness.
1853 */
1854 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1855 }
1856
1857 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1858 spin_lock(&chan->lock);
1859 xilinx_dma_complete_descriptor(chan);
1860 chan->idle = true;
1861 chan->start_transfer(chan);
1862 spin_unlock(&chan->lock);
1863 }
1864
1865 tasklet_hi_schedule(&chan->tasklet);
1866 return IRQ_HANDLED;
1867 }
1868
1869 /**
1870 * xilinx_dma_irq_handler - DMA Interrupt handler
1871 * @irq: IRQ number
1872 * @data: Pointer to the Xilinx DMA channel structure
1873 *
1874 * Return: IRQ_HANDLED/IRQ_NONE
1875 */
xilinx_dma_irq_handler(int irq,void * data)1876 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1877 {
1878 struct xilinx_dma_chan *chan = data;
1879 u32 status;
1880
1881 /* Read the status and ack the interrupts. */
1882 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1883 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1884 return IRQ_NONE;
1885
1886 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1887 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1888
1889 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1890 /*
1891 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1892 * error is recoverable, ignore it. Otherwise flag the error.
1893 *
1894 * Only recoverable errors can be cleared in the DMASR register,
1895 * make sure not to write to other error bits to 1.
1896 */
1897 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1898
1899 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1900 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1901
1902 if (!chan->flush_on_fsync ||
1903 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1904 dev_err(chan->dev,
1905 "Channel %p has errors %x, cdr %x tdr %x\n",
1906 chan, errors,
1907 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1908 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1909 chan->err = true;
1910 }
1911 }
1912
1913 if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
1914 XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
1915 spin_lock(&chan->lock);
1916 xilinx_dma_complete_descriptor(chan);
1917 chan->idle = true;
1918 chan->start_transfer(chan);
1919 spin_unlock(&chan->lock);
1920 }
1921
1922 tasklet_schedule(&chan->tasklet);
1923 return IRQ_HANDLED;
1924 }
1925
1926 /**
1927 * append_desc_queue - Queuing descriptor
1928 * @chan: Driver specific dma channel
1929 * @desc: dma transaction descriptor
1930 */
append_desc_queue(struct xilinx_dma_chan * chan,struct xilinx_dma_tx_descriptor * desc)1931 static void append_desc_queue(struct xilinx_dma_chan *chan,
1932 struct xilinx_dma_tx_descriptor *desc)
1933 {
1934 struct xilinx_vdma_tx_segment *tail_segment;
1935 struct xilinx_dma_tx_descriptor *tail_desc;
1936 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1937 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1938 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1939
1940 if (list_empty(&chan->pending_list))
1941 goto append;
1942
1943 /*
1944 * Add the hardware descriptor to the chain of hardware descriptors
1945 * that already exists in memory.
1946 */
1947 tail_desc = list_last_entry(&chan->pending_list,
1948 struct xilinx_dma_tx_descriptor, node);
1949 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1950 tail_segment = list_last_entry(&tail_desc->segments,
1951 struct xilinx_vdma_tx_segment,
1952 node);
1953 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1954 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1955 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1956 struct xilinx_cdma_tx_segment,
1957 node);
1958 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1959 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1960 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1961 struct xilinx_axidma_tx_segment,
1962 node);
1963 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1964 } else {
1965 aximcdma_tail_segment =
1966 list_last_entry(&tail_desc->segments,
1967 struct xilinx_aximcdma_tx_segment,
1968 node);
1969 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1970 }
1971
1972 /*
1973 * Add the software descriptor and all children to the list
1974 * of pending transactions
1975 */
1976 append:
1977 list_add_tail(&desc->node, &chan->pending_list);
1978 chan->desc_pendingcount++;
1979
1980 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1981 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1982 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1983 chan->desc_pendingcount = chan->num_frms;
1984 }
1985 }
1986
1987 /**
1988 * xilinx_dma_tx_submit - Submit DMA transaction
1989 * @tx: Async transaction descriptor
1990 *
1991 * Return: cookie value on success and failure value on error
1992 */
xilinx_dma_tx_submit(struct dma_async_tx_descriptor * tx)1993 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1994 {
1995 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1996 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1997 dma_cookie_t cookie;
1998 unsigned long flags;
1999 int err;
2000
2001 if (chan->cyclic) {
2002 xilinx_dma_free_tx_descriptor(chan, desc);
2003 return -EBUSY;
2004 }
2005
2006 if (chan->err) {
2007 /*
2008 * If reset fails, need to hard reset the system.
2009 * Channel is no longer functional
2010 */
2011 err = xilinx_dma_chan_reset(chan);
2012 if (err < 0)
2013 return err;
2014 }
2015
2016 spin_lock_irqsave(&chan->lock, flags);
2017
2018 cookie = dma_cookie_assign(tx);
2019
2020 /* Put this transaction onto the tail of the pending queue */
2021 append_desc_queue(chan, desc);
2022
2023 if (desc->cyclic)
2024 chan->cyclic = true;
2025
2026 chan->terminating = false;
2027
2028 spin_unlock_irqrestore(&chan->lock, flags);
2029
2030 return cookie;
2031 }
2032
2033 /**
2034 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
2035 * DMA_SLAVE transaction
2036 * @dchan: DMA channel
2037 * @xt: Interleaved template pointer
2038 * @flags: transfer ack flags
2039 *
2040 * Return: Async transaction descriptor on success and NULL on failure
2041 */
2042 static struct dma_async_tx_descriptor *
xilinx_vdma_dma_prep_interleaved(struct dma_chan * dchan,struct dma_interleaved_template * xt,unsigned long flags)2043 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
2044 struct dma_interleaved_template *xt,
2045 unsigned long flags)
2046 {
2047 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2048 struct xilinx_dma_tx_descriptor *desc;
2049 struct xilinx_vdma_tx_segment *segment;
2050 struct xilinx_vdma_desc_hw *hw;
2051
2052 if (!is_slave_direction(xt->dir))
2053 return NULL;
2054
2055 if (!xt->numf || !xt->sgl[0].size)
2056 return NULL;
2057
2058 if (xt->numf & ~XILINX_DMA_VSIZE_MASK ||
2059 xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK)
2060 return NULL;
2061
2062 if (xt->frame_size != 1)
2063 return NULL;
2064
2065 /* Allocate a transaction descriptor. */
2066 desc = xilinx_dma_alloc_tx_descriptor(chan);
2067 if (!desc)
2068 return NULL;
2069
2070 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2071 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2072 async_tx_ack(&desc->async_tx);
2073
2074 /* Allocate the link descriptor from DMA pool */
2075 segment = xilinx_vdma_alloc_tx_segment(chan);
2076 if (!segment)
2077 goto error;
2078
2079 /* Fill in the hardware descriptor */
2080 hw = &segment->hw;
2081 hw->vsize = xt->numf;
2082 hw->hsize = xt->sgl[0].size;
2083 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2084 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2085 hw->stride |= chan->config.frm_dly <<
2086 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2087
2088 if (xt->dir != DMA_MEM_TO_DEV) {
2089 if (chan->ext_addr) {
2090 hw->buf_addr = lower_32_bits(xt->dst_start);
2091 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2092 } else {
2093 hw->buf_addr = xt->dst_start;
2094 }
2095 } else {
2096 if (chan->ext_addr) {
2097 hw->buf_addr = lower_32_bits(xt->src_start);
2098 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2099 } else {
2100 hw->buf_addr = xt->src_start;
2101 }
2102 }
2103
2104 /* Insert the segment into the descriptor segments list. */
2105 list_add_tail(&segment->node, &desc->segments);
2106
2107 /* Link the last hardware descriptor with the first. */
2108 segment = list_first_entry(&desc->segments,
2109 struct xilinx_vdma_tx_segment, node);
2110 desc->async_tx.phys = segment->phys;
2111
2112 return &desc->async_tx;
2113
2114 error:
2115 xilinx_dma_free_tx_descriptor(chan, desc);
2116 return NULL;
2117 }
2118
2119 /**
2120 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2121 * @dchan: DMA channel
2122 * @dma_dst: destination address
2123 * @dma_src: source address
2124 * @len: transfer length
2125 * @flags: transfer ack flags
2126 *
2127 * Return: Async transaction descriptor on success and NULL on failure
2128 */
2129 static struct dma_async_tx_descriptor *
xilinx_cdma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)2130 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2131 dma_addr_t dma_src, size_t len, unsigned long flags)
2132 {
2133 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2134 struct xilinx_dma_tx_descriptor *desc;
2135 struct xilinx_cdma_tx_segment *segment;
2136 struct xilinx_cdma_desc_hw *hw;
2137
2138 if (!len || len > chan->xdev->max_buffer_len)
2139 return NULL;
2140
2141 desc = xilinx_dma_alloc_tx_descriptor(chan);
2142 if (!desc)
2143 return NULL;
2144
2145 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2146 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2147
2148 /* Allocate the link descriptor from DMA pool */
2149 segment = xilinx_cdma_alloc_tx_segment(chan);
2150 if (!segment)
2151 goto error;
2152
2153 hw = &segment->hw;
2154 hw->control = len;
2155 hw->src_addr = dma_src;
2156 hw->dest_addr = dma_dst;
2157 if (chan->ext_addr) {
2158 hw->src_addr_msb = upper_32_bits(dma_src);
2159 hw->dest_addr_msb = upper_32_bits(dma_dst);
2160 }
2161
2162 /* Insert the segment into the descriptor segments list. */
2163 list_add_tail(&segment->node, &desc->segments);
2164
2165 desc->async_tx.phys = segment->phys;
2166 hw->next_desc = segment->phys;
2167
2168 return &desc->async_tx;
2169
2170 error:
2171 xilinx_dma_free_tx_descriptor(chan, desc);
2172 return NULL;
2173 }
2174
2175 /**
2176 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2177 * @dchan: DMA channel
2178 * @sgl: scatterlist to transfer to/from
2179 * @sg_len: number of entries in @scatterlist
2180 * @direction: DMA direction
2181 * @flags: transfer ack flags
2182 * @context: APP words of the descriptor
2183 *
2184 * Return: Async transaction descriptor on success and NULL on failure
2185 */
xilinx_dma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2186 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2187 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2188 enum dma_transfer_direction direction, unsigned long flags,
2189 void *context)
2190 {
2191 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2192 struct xilinx_dma_tx_descriptor *desc;
2193 struct xilinx_axidma_tx_segment *segment = NULL;
2194 u32 *app_w = (u32 *)context;
2195 struct scatterlist *sg;
2196 size_t copy;
2197 size_t sg_used;
2198 unsigned int i;
2199
2200 if (!is_slave_direction(direction))
2201 return NULL;
2202
2203 /* Allocate a transaction descriptor. */
2204 desc = xilinx_dma_alloc_tx_descriptor(chan);
2205 if (!desc)
2206 return NULL;
2207
2208 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2209 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2210
2211 /* Build transactions using information in the scatter gather list */
2212 for_each_sg(sgl, sg, sg_len, i) {
2213 sg_used = 0;
2214
2215 /* Loop until the entire scatterlist entry is used */
2216 while (sg_used < sg_dma_len(sg)) {
2217 struct xilinx_axidma_desc_hw *hw;
2218
2219 /* Get a free segment */
2220 segment = xilinx_axidma_alloc_tx_segment(chan);
2221 if (!segment)
2222 goto error;
2223
2224 /*
2225 * Calculate the maximum number of bytes to transfer,
2226 * making sure it is less than the hw limit
2227 */
2228 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2229 sg_used);
2230 hw = &segment->hw;
2231
2232 /* Fill in the descriptor */
2233 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2234 sg_used, 0);
2235
2236 hw->control = copy;
2237
2238 if (chan->direction == DMA_MEM_TO_DEV) {
2239 if (app_w)
2240 memcpy(hw->app, app_w, sizeof(u32) *
2241 XILINX_DMA_NUM_APP_WORDS);
2242 }
2243
2244 sg_used += copy;
2245
2246 /*
2247 * Insert the segment into the descriptor segments
2248 * list.
2249 */
2250 list_add_tail(&segment->node, &desc->segments);
2251 }
2252 }
2253
2254 segment = list_first_entry(&desc->segments,
2255 struct xilinx_axidma_tx_segment, node);
2256 desc->async_tx.phys = segment->phys;
2257
2258 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2259 if (chan->direction == DMA_MEM_TO_DEV) {
2260 segment->hw.control |= XILINX_DMA_BD_SOP;
2261 segment = list_last_entry(&desc->segments,
2262 struct xilinx_axidma_tx_segment,
2263 node);
2264 segment->hw.control |= XILINX_DMA_BD_EOP;
2265 }
2266
2267 if (chan->xdev->has_axistream_connected)
2268 desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
2269
2270 return &desc->async_tx;
2271
2272 error:
2273 xilinx_dma_free_tx_descriptor(chan, desc);
2274 return NULL;
2275 }
2276
2277 /**
2278 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2279 * @dchan: DMA channel
2280 * @buf_addr: Physical address of the buffer
2281 * @buf_len: Total length of the cyclic buffers
2282 * @period_len: length of individual cyclic buffer
2283 * @direction: DMA direction
2284 * @flags: transfer ack flags
2285 *
2286 * Return: Async transaction descriptor on success and NULL on failure
2287 */
xilinx_dma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)2288 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2289 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2290 size_t period_len, enum dma_transfer_direction direction,
2291 unsigned long flags)
2292 {
2293 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2294 struct xilinx_dma_tx_descriptor *desc;
2295 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2296 size_t copy, sg_used;
2297 unsigned int num_periods;
2298 int i;
2299 u32 reg;
2300
2301 if (!period_len)
2302 return NULL;
2303
2304 num_periods = buf_len / period_len;
2305
2306 if (!num_periods)
2307 return NULL;
2308
2309 if (!is_slave_direction(direction))
2310 return NULL;
2311
2312 /* Allocate a transaction descriptor. */
2313 desc = xilinx_dma_alloc_tx_descriptor(chan);
2314 if (!desc)
2315 return NULL;
2316
2317 chan->direction = direction;
2318 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2319 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2320
2321 for (i = 0; i < num_periods; ++i) {
2322 sg_used = 0;
2323
2324 while (sg_used < period_len) {
2325 struct xilinx_axidma_desc_hw *hw;
2326
2327 /* Get a free segment */
2328 segment = xilinx_axidma_alloc_tx_segment(chan);
2329 if (!segment)
2330 goto error;
2331
2332 /*
2333 * Calculate the maximum number of bytes to transfer,
2334 * making sure it is less than the hw limit
2335 */
2336 copy = xilinx_dma_calc_copysize(chan, period_len,
2337 sg_used);
2338 hw = &segment->hw;
2339 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2340 period_len * i);
2341 hw->control = copy;
2342
2343 if (prev)
2344 prev->hw.next_desc = segment->phys;
2345
2346 prev = segment;
2347 sg_used += copy;
2348
2349 /*
2350 * Insert the segment into the descriptor segments
2351 * list.
2352 */
2353 list_add_tail(&segment->node, &desc->segments);
2354 }
2355 }
2356
2357 head_segment = list_first_entry(&desc->segments,
2358 struct xilinx_axidma_tx_segment, node);
2359 desc->async_tx.phys = head_segment->phys;
2360
2361 desc->cyclic = true;
2362 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2363 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2364 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2365
2366 segment = list_last_entry(&desc->segments,
2367 struct xilinx_axidma_tx_segment,
2368 node);
2369 segment->hw.next_desc = (u32) head_segment->phys;
2370
2371 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2372 if (direction == DMA_MEM_TO_DEV) {
2373 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2374 segment->hw.control |= XILINX_DMA_BD_EOP;
2375 }
2376
2377 return &desc->async_tx;
2378
2379 error:
2380 xilinx_dma_free_tx_descriptor(chan, desc);
2381 return NULL;
2382 }
2383
2384 /**
2385 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2386 * @dchan: DMA channel
2387 * @sgl: scatterlist to transfer to/from
2388 * @sg_len: number of entries in @scatterlist
2389 * @direction: DMA direction
2390 * @flags: transfer ack flags
2391 * @context: APP words of the descriptor
2392 *
2393 * Return: Async transaction descriptor on success and NULL on failure
2394 */
2395 static struct dma_async_tx_descriptor *
xilinx_mcdma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2396 xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2397 unsigned int sg_len,
2398 enum dma_transfer_direction direction,
2399 unsigned long flags, void *context)
2400 {
2401 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2402 struct xilinx_dma_tx_descriptor *desc;
2403 struct xilinx_aximcdma_tx_segment *segment = NULL;
2404 u32 *app_w = (u32 *)context;
2405 struct scatterlist *sg;
2406 size_t copy;
2407 size_t sg_used;
2408 unsigned int i;
2409
2410 if (!is_slave_direction(direction))
2411 return NULL;
2412
2413 /* Allocate a transaction descriptor. */
2414 desc = xilinx_dma_alloc_tx_descriptor(chan);
2415 if (!desc)
2416 return NULL;
2417
2418 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2419 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2420
2421 /* Build transactions using information in the scatter gather list */
2422 for_each_sg(sgl, sg, sg_len, i) {
2423 sg_used = 0;
2424
2425 /* Loop until the entire scatterlist entry is used */
2426 while (sg_used < sg_dma_len(sg)) {
2427 struct xilinx_aximcdma_desc_hw *hw;
2428
2429 /* Get a free segment */
2430 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2431 if (!segment)
2432 goto error;
2433
2434 /*
2435 * Calculate the maximum number of bytes to transfer,
2436 * making sure it is less than the hw limit
2437 */
2438 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2439 chan->xdev->max_buffer_len);
2440 hw = &segment->hw;
2441
2442 /* Fill in the descriptor */
2443 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2444 sg_used);
2445 hw->control = copy;
2446
2447 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2448 memcpy(hw->app, app_w, sizeof(u32) *
2449 XILINX_DMA_NUM_APP_WORDS);
2450 }
2451
2452 sg_used += copy;
2453 /*
2454 * Insert the segment into the descriptor segments
2455 * list.
2456 */
2457 list_add_tail(&segment->node, &desc->segments);
2458 }
2459 }
2460
2461 segment = list_first_entry(&desc->segments,
2462 struct xilinx_aximcdma_tx_segment, node);
2463 desc->async_tx.phys = segment->phys;
2464
2465 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2466 if (chan->direction == DMA_MEM_TO_DEV) {
2467 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2468 segment = list_last_entry(&desc->segments,
2469 struct xilinx_aximcdma_tx_segment,
2470 node);
2471 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2472 }
2473
2474 return &desc->async_tx;
2475
2476 error:
2477 xilinx_dma_free_tx_descriptor(chan, desc);
2478
2479 return NULL;
2480 }
2481
2482 /**
2483 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2484 * @dchan: Driver specific DMA Channel pointer
2485 *
2486 * Return: '0' always.
2487 */
xilinx_dma_terminate_all(struct dma_chan * dchan)2488 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2489 {
2490 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2491 u32 reg;
2492 int err;
2493
2494 if (!chan->cyclic) {
2495 err = chan->stop_transfer(chan);
2496 if (err) {
2497 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2498 chan, dma_ctrl_read(chan,
2499 XILINX_DMA_REG_DMASR));
2500 chan->err = true;
2501 }
2502 }
2503
2504 xilinx_dma_chan_reset(chan);
2505 /* Remove and free all of the descriptors in the lists */
2506 chan->terminating = true;
2507 xilinx_dma_free_descriptors(chan);
2508 chan->idle = true;
2509
2510 if (chan->cyclic) {
2511 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2512 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2513 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2514 chan->cyclic = false;
2515 }
2516
2517 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2518 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2519 XILINX_CDMA_CR_SGMODE);
2520
2521 return 0;
2522 }
2523
xilinx_dma_synchronize(struct dma_chan * dchan)2524 static void xilinx_dma_synchronize(struct dma_chan *dchan)
2525 {
2526 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2527
2528 tasklet_kill(&chan->tasklet);
2529 }
2530
2531 /**
2532 * xilinx_vdma_channel_set_config - Configure VDMA channel
2533 * Run-time configuration for Axi VDMA, supports:
2534 * . halt the channel
2535 * . configure interrupt coalescing and inter-packet delay threshold
2536 * . start/stop parking
2537 * . enable genlock
2538 *
2539 * @dchan: DMA channel
2540 * @cfg: VDMA device configuration pointer
2541 *
2542 * Return: '0' on success and failure value on error
2543 */
xilinx_vdma_channel_set_config(struct dma_chan * dchan,struct xilinx_vdma_config * cfg)2544 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2545 struct xilinx_vdma_config *cfg)
2546 {
2547 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2548 u32 dmacr;
2549
2550 if (cfg->reset)
2551 return xilinx_dma_chan_reset(chan);
2552
2553 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2554
2555 chan->config.frm_dly = cfg->frm_dly;
2556 chan->config.park = cfg->park;
2557
2558 /* genlock settings */
2559 chan->config.gen_lock = cfg->gen_lock;
2560 chan->config.master = cfg->master;
2561
2562 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2563 if (cfg->gen_lock && chan->genlock) {
2564 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2565 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2566 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2567 }
2568
2569 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2570 chan->config.vflip_en = cfg->vflip_en;
2571
2572 if (cfg->park)
2573 chan->config.park_frm = cfg->park_frm;
2574 else
2575 chan->config.park_frm = -1;
2576
2577 chan->config.coalesc = cfg->coalesc;
2578 chan->config.delay = cfg->delay;
2579
2580 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2581 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2582 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2583 chan->config.coalesc = cfg->coalesc;
2584 }
2585
2586 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2587 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2588 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2589 chan->config.delay = cfg->delay;
2590 }
2591
2592 /* FSync Source selection */
2593 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2594 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2595
2596 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2597
2598 return 0;
2599 }
2600 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2601
2602 /* -----------------------------------------------------------------------------
2603 * Probe and remove
2604 */
2605
2606 /**
2607 * xilinx_dma_chan_remove - Per Channel remove function
2608 * @chan: Driver specific DMA channel
2609 */
xilinx_dma_chan_remove(struct xilinx_dma_chan * chan)2610 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2611 {
2612 /* Disable all interrupts */
2613 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2614 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2615
2616 if (chan->irq > 0)
2617 free_irq(chan->irq, chan);
2618
2619 tasklet_kill(&chan->tasklet);
2620
2621 list_del(&chan->common.device_node);
2622 }
2623
axidma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** sg_clk,struct clk ** tmp_clk)2624 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2625 struct clk **tx_clk, struct clk **rx_clk,
2626 struct clk **sg_clk, struct clk **tmp_clk)
2627 {
2628 int err;
2629
2630 *tmp_clk = NULL;
2631
2632 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2633 if (IS_ERR(*axi_clk))
2634 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2635
2636 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2637 if (IS_ERR(*tx_clk))
2638 *tx_clk = NULL;
2639
2640 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2641 if (IS_ERR(*rx_clk))
2642 *rx_clk = NULL;
2643
2644 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2645 if (IS_ERR(*sg_clk))
2646 *sg_clk = NULL;
2647
2648 err = clk_prepare_enable(*axi_clk);
2649 if (err) {
2650 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2651 return err;
2652 }
2653
2654 err = clk_prepare_enable(*tx_clk);
2655 if (err) {
2656 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2657 goto err_disable_axiclk;
2658 }
2659
2660 err = clk_prepare_enable(*rx_clk);
2661 if (err) {
2662 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2663 goto err_disable_txclk;
2664 }
2665
2666 err = clk_prepare_enable(*sg_clk);
2667 if (err) {
2668 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2669 goto err_disable_rxclk;
2670 }
2671
2672 return 0;
2673
2674 err_disable_rxclk:
2675 clk_disable_unprepare(*rx_clk);
2676 err_disable_txclk:
2677 clk_disable_unprepare(*tx_clk);
2678 err_disable_axiclk:
2679 clk_disable_unprepare(*axi_clk);
2680
2681 return err;
2682 }
2683
axicdma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** dev_clk,struct clk ** tmp_clk,struct clk ** tmp1_clk,struct clk ** tmp2_clk)2684 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2685 struct clk **dev_clk, struct clk **tmp_clk,
2686 struct clk **tmp1_clk, struct clk **tmp2_clk)
2687 {
2688 int err;
2689
2690 *tmp_clk = NULL;
2691 *tmp1_clk = NULL;
2692 *tmp2_clk = NULL;
2693
2694 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2695 if (IS_ERR(*axi_clk))
2696 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2697
2698 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2699 if (IS_ERR(*dev_clk))
2700 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2701
2702 err = clk_prepare_enable(*axi_clk);
2703 if (err) {
2704 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2705 return err;
2706 }
2707
2708 err = clk_prepare_enable(*dev_clk);
2709 if (err) {
2710 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2711 goto err_disable_axiclk;
2712 }
2713
2714 return 0;
2715
2716 err_disable_axiclk:
2717 clk_disable_unprepare(*axi_clk);
2718
2719 return err;
2720 }
2721
axivdma_clk_init(struct platform_device * pdev,struct clk ** axi_clk,struct clk ** tx_clk,struct clk ** txs_clk,struct clk ** rx_clk,struct clk ** rxs_clk)2722 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2723 struct clk **tx_clk, struct clk **txs_clk,
2724 struct clk **rx_clk, struct clk **rxs_clk)
2725 {
2726 int err;
2727
2728 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2729 if (IS_ERR(*axi_clk))
2730 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2731
2732 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2733 if (IS_ERR(*tx_clk))
2734 *tx_clk = NULL;
2735
2736 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2737 if (IS_ERR(*txs_clk))
2738 *txs_clk = NULL;
2739
2740 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2741 if (IS_ERR(*rx_clk))
2742 *rx_clk = NULL;
2743
2744 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2745 if (IS_ERR(*rxs_clk))
2746 *rxs_clk = NULL;
2747
2748 err = clk_prepare_enable(*axi_clk);
2749 if (err) {
2750 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2751 err);
2752 return err;
2753 }
2754
2755 err = clk_prepare_enable(*tx_clk);
2756 if (err) {
2757 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2758 goto err_disable_axiclk;
2759 }
2760
2761 err = clk_prepare_enable(*txs_clk);
2762 if (err) {
2763 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2764 goto err_disable_txclk;
2765 }
2766
2767 err = clk_prepare_enable(*rx_clk);
2768 if (err) {
2769 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2770 goto err_disable_txsclk;
2771 }
2772
2773 err = clk_prepare_enable(*rxs_clk);
2774 if (err) {
2775 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2776 goto err_disable_rxclk;
2777 }
2778
2779 return 0;
2780
2781 err_disable_rxclk:
2782 clk_disable_unprepare(*rx_clk);
2783 err_disable_txsclk:
2784 clk_disable_unprepare(*txs_clk);
2785 err_disable_txclk:
2786 clk_disable_unprepare(*tx_clk);
2787 err_disable_axiclk:
2788 clk_disable_unprepare(*axi_clk);
2789
2790 return err;
2791 }
2792
xdma_disable_allclks(struct xilinx_dma_device * xdev)2793 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2794 {
2795 clk_disable_unprepare(xdev->rxs_clk);
2796 clk_disable_unprepare(xdev->rx_clk);
2797 clk_disable_unprepare(xdev->txs_clk);
2798 clk_disable_unprepare(xdev->tx_clk);
2799 clk_disable_unprepare(xdev->axi_clk);
2800 }
2801
2802 /**
2803 * xilinx_dma_chan_probe - Per Channel Probing
2804 * It get channel features from the device tree entry and
2805 * initialize special channel handling routines
2806 *
2807 * @xdev: Driver specific device structure
2808 * @node: Device node
2809 *
2810 * Return: '0' on success and failure value on error
2811 */
xilinx_dma_chan_probe(struct xilinx_dma_device * xdev,struct device_node * node)2812 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2813 struct device_node *node)
2814 {
2815 struct xilinx_dma_chan *chan;
2816 bool has_dre = false;
2817 u32 value, width;
2818 int err;
2819
2820 /* Allocate and initialize the channel structure */
2821 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2822 if (!chan)
2823 return -ENOMEM;
2824
2825 chan->dev = xdev->dev;
2826 chan->xdev = xdev;
2827 chan->desc_pendingcount = 0x0;
2828 chan->ext_addr = xdev->ext_addr;
2829 /* This variable ensures that descriptors are not
2830 * Submitted when dma engine is in progress. This variable is
2831 * Added to avoid polling for a bit in the status register to
2832 * Know dma state in the driver hot path.
2833 */
2834 chan->idle = true;
2835
2836 spin_lock_init(&chan->lock);
2837 INIT_LIST_HEAD(&chan->pending_list);
2838 INIT_LIST_HEAD(&chan->done_list);
2839 INIT_LIST_HEAD(&chan->active_list);
2840 INIT_LIST_HEAD(&chan->free_seg_list);
2841
2842 /* Retrieve the channel properties from the device tree */
2843 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2844
2845 of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
2846
2847 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2848
2849 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2850 if (err) {
2851 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2852 return err;
2853 }
2854 width = value >> 3; /* Convert bits to bytes */
2855
2856 /* If data width is greater than 8 bytes, DRE is not in hw */
2857 if (width > 8)
2858 has_dre = false;
2859
2860 if (!has_dre)
2861 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2862
2863 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2864 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2865 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2866 chan->direction = DMA_MEM_TO_DEV;
2867 chan->id = xdev->mm2s_chan_id++;
2868 chan->tdest = chan->id;
2869
2870 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2871 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2872 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2873 chan->config.park = 1;
2874
2875 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2876 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2877 chan->flush_on_fsync = true;
2878 }
2879 } else if (of_device_is_compatible(node,
2880 "xlnx,axi-vdma-s2mm-channel") ||
2881 of_device_is_compatible(node,
2882 "xlnx,axi-dma-s2mm-channel")) {
2883 chan->direction = DMA_DEV_TO_MEM;
2884 chan->id = xdev->s2mm_chan_id++;
2885 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2886 chan->has_vflip = of_property_read_bool(node,
2887 "xlnx,enable-vert-flip");
2888 if (chan->has_vflip) {
2889 chan->config.vflip_en = dma_read(chan,
2890 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2891 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2892 }
2893
2894 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2895 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2896 else
2897 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2898
2899 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2900 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2901 chan->config.park = 1;
2902
2903 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2904 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2905 chan->flush_on_fsync = true;
2906 }
2907 } else {
2908 dev_err(xdev->dev, "Invalid channel compatible node\n");
2909 return -EINVAL;
2910 }
2911
2912 xdev->common.directions |= chan->direction;
2913
2914 /* Request the interrupt */
2915 chan->irq = of_irq_get(node, chan->tdest);
2916 if (chan->irq < 0)
2917 return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
2918 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2919 IRQF_SHARED, "xilinx-dma-controller", chan);
2920 if (err) {
2921 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2922 return err;
2923 }
2924
2925 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2926 chan->start_transfer = xilinx_dma_start_transfer;
2927 chan->stop_transfer = xilinx_dma_stop_transfer;
2928 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2929 chan->start_transfer = xilinx_mcdma_start_transfer;
2930 chan->stop_transfer = xilinx_dma_stop_transfer;
2931 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2932 chan->start_transfer = xilinx_cdma_start_transfer;
2933 chan->stop_transfer = xilinx_cdma_stop_transfer;
2934 } else {
2935 chan->start_transfer = xilinx_vdma_start_transfer;
2936 chan->stop_transfer = xilinx_dma_stop_transfer;
2937 }
2938
2939 /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
2940 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2941 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
2942 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2943 XILINX_DMA_DMASR_SG_MASK)
2944 chan->has_sg = true;
2945 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2946 str_enabled_disabled(chan->has_sg));
2947 }
2948
2949 /* Initialize the tasklet */
2950 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
2951
2952 /*
2953 * Initialize the DMA channel and add it to the DMA engine channels
2954 * list.
2955 */
2956 chan->common.device = &xdev->common;
2957
2958 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2959 xdev->chan[chan->id] = chan;
2960
2961 /* Reset the channel */
2962 err = xilinx_dma_chan_reset(chan);
2963 if (err < 0) {
2964 dev_err(xdev->dev, "Reset channel failed\n");
2965 return err;
2966 }
2967
2968 return 0;
2969 }
2970
2971 /**
2972 * xilinx_dma_child_probe - Per child node probe
2973 * It get number of dma-channels per child node from
2974 * device-tree and initializes all the channels.
2975 *
2976 * @xdev: Driver specific device structure
2977 * @node: Device node
2978 *
2979 * Return: '0' on success and failure value on error.
2980 */
xilinx_dma_child_probe(struct xilinx_dma_device * xdev,struct device_node * node)2981 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2982 struct device_node *node)
2983 {
2984 int ret, i;
2985 u32 nr_channels = 1;
2986
2987 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2988 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2989 dev_warn(xdev->dev, "missing dma-channels property\n");
2990
2991 for (i = 0; i < nr_channels; i++) {
2992 ret = xilinx_dma_chan_probe(xdev, node);
2993 if (ret)
2994 return ret;
2995 }
2996
2997 return 0;
2998 }
2999
3000 /**
3001 * of_dma_xilinx_xlate - Translation function
3002 * @dma_spec: Pointer to DMA specifier as found in the device tree
3003 * @ofdma: Pointer to DMA controller data
3004 *
3005 * Return: DMA channel pointer on success and NULL on error
3006 */
of_dma_xilinx_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)3007 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
3008 struct of_dma *ofdma)
3009 {
3010 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
3011 int chan_id = dma_spec->args[0];
3012
3013 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
3014 return NULL;
3015
3016 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
3017 }
3018
3019 static const struct xilinx_dma_config axidma_config = {
3020 .dmatype = XDMA_TYPE_AXIDMA,
3021 .clk_init = axidma_clk_init,
3022 .irq_handler = xilinx_dma_irq_handler,
3023 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3024 };
3025
3026 static const struct xilinx_dma_config aximcdma_config = {
3027 .dmatype = XDMA_TYPE_AXIMCDMA,
3028 .clk_init = axidma_clk_init,
3029 .irq_handler = xilinx_mcdma_irq_handler,
3030 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
3031 };
3032 static const struct xilinx_dma_config axicdma_config = {
3033 .dmatype = XDMA_TYPE_CDMA,
3034 .clk_init = axicdma_clk_init,
3035 .irq_handler = xilinx_dma_irq_handler,
3036 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
3037 };
3038
3039 static const struct xilinx_dma_config axivdma_config = {
3040 .dmatype = XDMA_TYPE_VDMA,
3041 .clk_init = axivdma_clk_init,
3042 .irq_handler = xilinx_dma_irq_handler,
3043 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3044 };
3045
3046 static const struct of_device_id xilinx_dma_of_ids[] = {
3047 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
3048 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
3049 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
3050 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
3051 {}
3052 };
3053 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
3054
3055 /**
3056 * xilinx_dma_probe - Driver probe function
3057 * @pdev: Pointer to the platform_device structure
3058 *
3059 * Return: '0' on success and failure value on error
3060 */
xilinx_dma_probe(struct platform_device * pdev)3061 static int xilinx_dma_probe(struct platform_device *pdev)
3062 {
3063 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
3064 struct clk **, struct clk **, struct clk **)
3065 = axivdma_clk_init;
3066 struct device_node *node = pdev->dev.of_node;
3067 struct xilinx_dma_device *xdev;
3068 struct device_node *child, *np = pdev->dev.of_node;
3069 u32 num_frames, addr_width, len_width;
3070 int i, err;
3071
3072 /* Allocate and initialize the DMA engine structure */
3073 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
3074 if (!xdev)
3075 return -ENOMEM;
3076
3077 xdev->dev = &pdev->dev;
3078 if (np) {
3079 const struct of_device_id *match;
3080
3081 match = of_match_node(xilinx_dma_of_ids, np);
3082 if (match && match->data) {
3083 xdev->dma_config = match->data;
3084 clk_init = xdev->dma_config->clk_init;
3085 }
3086 }
3087
3088 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3089 &xdev->rx_clk, &xdev->rxs_clk);
3090 if (err)
3091 return err;
3092
3093 /* Request and map I/O memory */
3094 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3095 if (IS_ERR(xdev->regs)) {
3096 err = PTR_ERR(xdev->regs);
3097 goto disable_clks;
3098 }
3099 /* Retrieve the DMA engine properties from the device tree */
3100 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3101 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3102
3103 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3104 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3105 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3106 &len_width)) {
3107 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3108 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3109 dev_warn(xdev->dev,
3110 "invalid xlnx,sg-length-width property value. Using default width\n");
3111 } else {
3112 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3113 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3114 xdev->max_buffer_len =
3115 GENMASK(len_width - 1, 0);
3116 }
3117 }
3118 }
3119
3120 dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
3121
3122 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3123 xdev->has_axistream_connected =
3124 of_property_read_bool(node, "xlnx,axistream-connected");
3125 }
3126
3127 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3128 err = of_property_read_u32(node, "xlnx,num-fstores",
3129 &num_frames);
3130 if (err < 0) {
3131 dev_err(xdev->dev,
3132 "missing xlnx,num-fstores property\n");
3133 goto disable_clks;
3134 }
3135
3136 err = of_property_read_u32(node, "xlnx,flush-fsync",
3137 &xdev->flush_on_fsync);
3138 if (err < 0)
3139 dev_warn(xdev->dev,
3140 "missing xlnx,flush-fsync property\n");
3141 }
3142
3143 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3144 if (err < 0)
3145 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3146
3147 if (addr_width > 32)
3148 xdev->ext_addr = true;
3149 else
3150 xdev->ext_addr = false;
3151
3152 /* Set metadata mode */
3153 if (xdev->has_axistream_connected)
3154 xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
3155
3156 /* Set the dma mask bits */
3157 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3158 if (err < 0) {
3159 dev_err(xdev->dev, "DMA mask error %d\n", err);
3160 goto disable_clks;
3161 }
3162
3163 /* Initialize the DMA engine */
3164 xdev->common.dev = &pdev->dev;
3165
3166 INIT_LIST_HEAD(&xdev->common.channels);
3167 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3168 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3169 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3170 }
3171
3172 xdev->common.device_alloc_chan_resources =
3173 xilinx_dma_alloc_chan_resources;
3174 xdev->common.device_free_chan_resources =
3175 xilinx_dma_free_chan_resources;
3176 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3177 xdev->common.device_synchronize = xilinx_dma_synchronize;
3178 xdev->common.device_tx_status = xilinx_dma_tx_status;
3179 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3180 xdev->common.device_config = xilinx_dma_device_config;
3181 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3182 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3183 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3184 xdev->common.device_prep_dma_cyclic =
3185 xilinx_dma_prep_dma_cyclic;
3186 /* Residue calculation is supported by only AXI DMA and CDMA */
3187 xdev->common.residue_granularity =
3188 DMA_RESIDUE_GRANULARITY_SEGMENT;
3189 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3190 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3191 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3192 /* Residue calculation is supported by only AXI DMA and CDMA */
3193 xdev->common.residue_granularity =
3194 DMA_RESIDUE_GRANULARITY_SEGMENT;
3195 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3196 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3197 } else {
3198 xdev->common.device_prep_interleaved_dma =
3199 xilinx_vdma_dma_prep_interleaved;
3200 }
3201
3202 platform_set_drvdata(pdev, xdev);
3203
3204 /* Initialize the channels */
3205 for_each_child_of_node(node, child) {
3206 err = xilinx_dma_child_probe(xdev, child);
3207 if (err < 0) {
3208 of_node_put(child);
3209 goto error;
3210 }
3211 }
3212
3213 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3214 for (i = 0; i < xdev->dma_config->max_channels; i++)
3215 if (xdev->chan[i])
3216 xdev->chan[i]->num_frms = num_frames;
3217 }
3218
3219 /* Register the DMA engine with the core */
3220 err = dma_async_device_register(&xdev->common);
3221 if (err) {
3222 dev_err(xdev->dev, "failed to register the dma device\n");
3223 goto error;
3224 }
3225
3226 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3227 xdev);
3228 if (err < 0) {
3229 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3230 dma_async_device_unregister(&xdev->common);
3231 goto error;
3232 }
3233
3234 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3235 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3236 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3237 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3238 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3239 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3240 else
3241 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3242
3243 return 0;
3244
3245 error:
3246 for (i = 0; i < xdev->dma_config->max_channels; i++)
3247 if (xdev->chan[i])
3248 xilinx_dma_chan_remove(xdev->chan[i]);
3249 disable_clks:
3250 xdma_disable_allclks(xdev);
3251
3252 return err;
3253 }
3254
3255 /**
3256 * xilinx_dma_remove - Driver remove function
3257 * @pdev: Pointer to the platform_device structure
3258 */
xilinx_dma_remove(struct platform_device * pdev)3259 static void xilinx_dma_remove(struct platform_device *pdev)
3260 {
3261 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3262 int i;
3263
3264 of_dma_controller_free(pdev->dev.of_node);
3265
3266 dma_async_device_unregister(&xdev->common);
3267
3268 for (i = 0; i < xdev->dma_config->max_channels; i++)
3269 if (xdev->chan[i])
3270 xilinx_dma_chan_remove(xdev->chan[i]);
3271
3272 xdma_disable_allclks(xdev);
3273 }
3274
3275 static struct platform_driver xilinx_vdma_driver = {
3276 .driver = {
3277 .name = "xilinx-vdma",
3278 .of_match_table = xilinx_dma_of_ids,
3279 },
3280 .probe = xilinx_dma_probe,
3281 .remove = xilinx_dma_remove,
3282 };
3283
3284 module_platform_driver(xilinx_vdma_driver);
3285
3286 MODULE_AUTHOR("Xilinx, Inc.");
3287 MODULE_DESCRIPTION("Xilinx VDMA driver");
3288 MODULE_LICENSE("GPL v2");
3289