1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Copyright 2013-2014 Freescale Semiconductor, Inc.
4 * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
5 */
6 #ifndef _FSL_EDMA_COMMON_H_
7 #define _FSL_EDMA_COMMON_H_
8
9 #include <linux/dma-direction.h>
10 #include <linux/platform_device.h>
11 #include "virt-dma.h"
12
13 #define EDMA_CR_EDBG BIT(1)
14 #define EDMA_CR_ERCA BIT(2)
15 #define EDMA_CR_ERGA BIT(3)
16 #define EDMA_CR_HOE BIT(4)
17 #define EDMA_CR_HALT BIT(5)
18 #define EDMA_CR_CLM BIT(6)
19 #define EDMA_CR_EMLM BIT(7)
20 #define EDMA_CR_ECX BIT(16)
21 #define EDMA_CR_CX BIT(17)
22
23 #define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
24 #define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
25 #define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
26 #define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
27
28 #define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
29 #define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
30 #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
31 #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
32
33 #define EDMA_TCD_ITER_MASK GENMASK(14, 0)
34 #define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
35 #define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
36
37 #define EDMA_TCD_CSR_START BIT(0)
38 #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
39 #define EDMA_TCD_CSR_INT_HALF BIT(2)
40 #define EDMA_TCD_CSR_D_REQ BIT(3)
41 #define EDMA_TCD_CSR_E_SG BIT(4)
42 #define EDMA_TCD_CSR_E_LINK BIT(5)
43 #define EDMA_TCD_CSR_ACTIVE BIT(6)
44 #define EDMA_TCD_CSR_DONE BIT(7)
45
46 #define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
47 #define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
48 #define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
49 #define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
50
51 #define EDMAMUX_CHCFG_DIS 0x0
52 #define EDMAMUX_CHCFG_ENBL 0x80
53 #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
54
55 #define DMAMUX_NR 2
56
57 #define EDMA_TCD 0x1000
58
59 #define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
60 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
61 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
62 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
63
64 #define EDMA_V3_CH_SBR_RD BIT(22)
65 #define EDMA_V3_CH_SBR_WR BIT(21)
66 #define EDMA_V3_CH_CSR_ERQ BIT(0)
67 #define EDMA_V3_CH_CSR_EARQ BIT(1)
68 #define EDMA_V3_CH_CSR_EEI BIT(2)
69 #define EDMA_V3_CH_CSR_DONE BIT(30)
70 #define EDMA_V3_CH_CSR_ACTIVE BIT(31)
71 #define EDMA_V3_CH_ES_ERR BIT(31)
72 #define EDMA_V3_MP_ES_VLD BIT(31)
73
74 #define EDMA_V3_CH_ERR_DBE BIT(0)
75 #define EDMA_V3_CH_ERR_SBE BIT(1)
76 #define EDMA_V3_CH_ERR_SGE BIT(2)
77 #define EDMA_V3_CH_ERR_NCE BIT(3)
78 #define EDMA_V3_CH_ERR_DOE BIT(4)
79 #define EDMA_V3_CH_ERR_DAE BIT(5)
80 #define EDMA_V3_CH_ERR_SOE BIT(6)
81 #define EDMA_V3_CH_ERR_SAE BIT(7)
82 #define EDMA_V3_CH_ERR_ECX BIT(8)
83 #define EDMA_V3_CH_ERR_UCE BIT(9)
84 #define EDMA_V3_CH_ERR BIT(31)
85
86 enum fsl_edma_pm_state {
87 RUNNING = 0,
88 SUSPENDED,
89 };
90
91 struct fsl_edma_hw_tcd {
92 __le32 saddr;
93 __le16 soff;
94 __le16 attr;
95 __le32 nbytes;
96 __le32 slast;
97 __le32 daddr;
98 __le16 doff;
99 __le16 citer;
100 __le32 dlast_sga;
101 __le16 csr;
102 __le16 biter;
103 };
104
105 struct fsl_edma_hw_tcd64 {
106 __le64 saddr;
107 __le16 soff;
108 __le16 attr;
109 __le32 nbytes;
110 __le64 slast;
111 __le64 daddr;
112 __le64 dlast_sga;
113 __le16 doff;
114 __le16 citer;
115 __le16 csr;
116 __le16 biter;
117 } __packed;
118
119 struct fsl_edma3_ch_reg {
120 __le32 ch_csr;
121 __le32 ch_es;
122 __le32 ch_int;
123 __le32 ch_sbr;
124 __le32 ch_pri;
125 __le32 ch_mux;
126 __le32 ch_mattr; /* edma4, reserved for edma3 */
127 __le32 ch_reserved;
128 union {
129 struct fsl_edma_hw_tcd tcd;
130 struct fsl_edma_hw_tcd64 tcd64;
131 };
132 } __packed;
133
134 /*
135 * These are iomem pointers, for both v32 and v64.
136 */
137 struct edma_regs {
138 void __iomem *cr;
139 void __iomem *es;
140 void __iomem *erqh;
141 void __iomem *erql; /* aka erq on v32 */
142 void __iomem *eeih;
143 void __iomem *eeil; /* aka eei on v32 */
144 void __iomem *seei;
145 void __iomem *ceei;
146 void __iomem *serq;
147 void __iomem *cerq;
148 void __iomem *cint;
149 void __iomem *cerr;
150 void __iomem *ssrt;
151 void __iomem *cdne;
152 void __iomem *inth;
153 void __iomem *intl;
154 void __iomem *errh;
155 void __iomem *errl;
156 };
157
158 struct fsl_edma_sw_tcd {
159 dma_addr_t ptcd;
160 void *vtcd;
161 };
162
163 struct fsl_edma_chan {
164 struct virt_dma_chan vchan;
165 enum dma_status status;
166 enum fsl_edma_pm_state pm_state;
167 struct fsl_edma_engine *edma;
168 struct fsl_edma_desc *edesc;
169 struct dma_slave_config cfg;
170 u32 attr;
171 bool is_sw;
172 struct dma_pool *tcd_pool;
173 dma_addr_t dma_dev_addr;
174 u32 dma_dev_size;
175 enum dma_data_direction dma_dir;
176 char chan_name[32];
177 char errirq_name[36];
178 void __iomem *tcd;
179 void __iomem *mux_addr;
180 u32 real_count;
181 struct work_struct issue_worker;
182 struct platform_device *pdev;
183 struct device *pd_dev;
184 struct device_link *pd_dev_link;
185 u32 srcid;
186 struct clk *clk;
187 int priority;
188 int hw_chanid;
189 int txirq;
190 int errirq;
191 irqreturn_t (*irq_handler)(int irq, void *dev_id);
192 irqreturn_t (*errirq_handler)(int irq, void *dev_id);
193 bool is_rxchan;
194 bool is_remote;
195 bool is_multi_fifo;
196 };
197
198 struct fsl_edma_desc {
199 struct virt_dma_desc vdesc;
200 struct fsl_edma_chan *echan;
201 bool iscyclic;
202 enum dma_transfer_direction dirn;
203 unsigned int n_tcds;
204 struct fsl_edma_sw_tcd tcd[];
205 };
206
207 #define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
208 #define FSL_EDMA_DRV_MUX_SWAP BIT(1)
209 #define FSL_EDMA_DRV_CONFIG32 BIT(2)
210 #define FSL_EDMA_DRV_WRAP_IO BIT(3)
211 #define FSL_EDMA_DRV_EDMA64 BIT(4)
212 #define FSL_EDMA_DRV_HAS_PD BIT(5)
213 #define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
214 #define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
215 #define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
216 /* control and status register is in tcd address space, edma3 reg layout */
217 #define FSL_EDMA_DRV_SPLIT_REG BIT(9)
218 #define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
219 #define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
220 #define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
221 /* Need clean CHn_CSR DONE before enable TCD's ESG */
222 #define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
223 /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
224 #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
225 #define FSL_EDMA_DRV_TCD64 BIT(15)
226 /* All channel ERR IRQ share one IRQ line */
227 #define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16)
228
229
230 #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
231 FSL_EDMA_DRV_BUS_8BYTE | \
232 FSL_EDMA_DRV_DEV_TO_DEV | \
233 FSL_EDMA_DRV_ALIGN_64BYTE | \
234 FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
235 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
236
237 #define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
238 FSL_EDMA_DRV_BUS_8BYTE | \
239 FSL_EDMA_DRV_DEV_TO_DEV | \
240 FSL_EDMA_DRV_ALIGN_64BYTE | \
241 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
242
243 struct fsl_edma_drvdata {
244 u32 dmamuxs; /* only used before v3 */
245 u32 chreg_off;
246 u32 chreg_space_sz;
247 u32 flags;
248 u32 mux_off; /* channel mux register offset */
249 u32 mux_skip; /* how much skip for each channel */
250 int (*setup_irq)(struct platform_device *pdev,
251 struct fsl_edma_engine *fsl_edma);
252 };
253
254 struct fsl_edma_engine {
255 struct dma_device dma_dev;
256 void __iomem *membase;
257 void __iomem *muxbase[DMAMUX_NR];
258 struct clk *muxclk[DMAMUX_NR];
259 struct clk *dmaclk;
260 struct mutex fsl_edma_mutex;
261 const struct fsl_edma_drvdata *drvdata;
262 u32 n_chans;
263 int txirq;
264 int txirq_16_31;
265 int errirq;
266 bool big_endian;
267 struct edma_regs regs;
268 u64 chan_masked;
269 struct fsl_edma_chan chans[] __counted_by(n_chans);
270 };
271
fsl_edma_drvflags(struct fsl_edma_chan * fsl_chan)272 static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
273 {
274 return fsl_chan->edma->drvdata->flags;
275 }
276
277 #define edma_read_tcdreg_c(chan, _tcd, __name) \
278 _Generic(((_tcd)->__name), \
279 __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \
280 __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \
281 __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \
282 )
283
284 #define edma_read_tcdreg(chan, __name) \
285 ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
286 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \
287 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
288 )
289
290 #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
291 _Generic((_tcd->__name), \
292 __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \
293 __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \
294 __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \
295 __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \
296 )
297
298 #define edma_write_tcdreg(chan, val, __name) \
299 do { \
300 struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
301 struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
302 \
303 if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
304 edma_write_tcdreg_c(chan, tcd64_r, val, __name); \
305 else \
306 edma_write_tcdreg_c(chan, tcd_r, val, __name); \
307 } while (0)
308
309 #define edma_cp_tcd_to_reg(chan, __tcd, __name) \
310 do { \
311 struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
312 struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
313 struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \
314 struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \
315 \
316 if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
317 edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \
318 else \
319 edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \
320 } while (0)
321
322 #define edma_readl_chreg(chan, __name) \
323 edma_readl(chan->edma, \
324 (void __iomem *)&(container_of(((__force void *)chan->tcd),\
325 struct fsl_edma3_ch_reg, tcd)->__name))
326
327 #define edma_writel_chreg(chan, val, __name) \
328 edma_writel(chan->edma, val, \
329 (void __iomem *)&(container_of(((__force void *)chan->tcd),\
330 struct fsl_edma3_ch_reg, tcd)->__name))
331
332 #define fsl_edma_get_tcd(_chan, _tcd, _field) \
333 (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
334 (((struct fsl_edma_hw_tcd *)_tcd)->_field))
335
336 #define fsl_edma_le_to_cpu(x) \
337 _Generic((x), \
338 __le64 : le64_to_cpu((x)), \
339 __le32 : le32_to_cpu((x)), \
340 __le16 : le16_to_cpu((x)) \
341 )
342
343 #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
344 (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
345 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
346 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
347
348 #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
349 _Generic(((_tcd)->_field), \
350 __le64 : (_tcd)->_field = cpu_to_le64(_val), \
351 __le32 : (_tcd)->_field = cpu_to_le32(_val), \
352 __le16 : (_tcd)->_field = cpu_to_le16(_val) \
353 )
354
355 #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
356 do { \
357 if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \
358 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \
359 else \
360 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
361 } while (0)
362
363 /* Need after struct defination */
364 #include "fsl-edma-trace.h"
365
366 /*
367 * R/W functions for big- or little-endian registers:
368 * The eDMA controller's endian is independent of the CPU core's endian.
369 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
370 * should also be swapped opposite to that in little-endian IP.
371 */
edma_readq(struct fsl_edma_engine * edma,void __iomem * addr)372 static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
373 {
374 u64 l, h;
375
376 if (edma->big_endian) {
377 l = ioread32be(addr);
378 h = ioread32be(addr + 4);
379 } else {
380 l = ioread32(addr);
381 h = ioread32(addr + 4);
382 }
383
384 trace_edma_readl(edma, addr, l);
385 trace_edma_readl(edma, addr + 4, h);
386
387 return (h << 32) | l;
388 }
389
edma_readl(struct fsl_edma_engine * edma,void __iomem * addr)390 static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
391 {
392 u32 val;
393
394 if (edma->big_endian)
395 val = ioread32be(addr);
396 else
397 val = ioread32(addr);
398
399 trace_edma_readl(edma, addr, val);
400
401 return val;
402 }
403
edma_readw(struct fsl_edma_engine * edma,void __iomem * addr)404 static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
405 {
406 u16 val;
407
408 if (edma->big_endian)
409 val = ioread16be(addr);
410 else
411 val = ioread16(addr);
412
413 trace_edma_readw(edma, addr, val);
414
415 return val;
416 }
417
edma_writeb(struct fsl_edma_engine * edma,u8 val,void __iomem * addr)418 static inline void edma_writeb(struct fsl_edma_engine *edma,
419 u8 val, void __iomem *addr)
420 {
421 /* swap the reg offset for these in big-endian mode */
422 if (edma->big_endian)
423 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
424 else
425 iowrite8(val, addr);
426
427 trace_edma_writeb(edma, addr, val);
428 }
429
edma_writew(struct fsl_edma_engine * edma,u16 val,void __iomem * addr)430 static inline void edma_writew(struct fsl_edma_engine *edma,
431 u16 val, void __iomem *addr)
432 {
433 /* swap the reg offset for these in big-endian mode */
434 if (edma->big_endian)
435 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
436 else
437 iowrite16(val, addr);
438
439 trace_edma_writew(edma, addr, val);
440 }
441
edma_writel(struct fsl_edma_engine * edma,u32 val,void __iomem * addr)442 static inline void edma_writel(struct fsl_edma_engine *edma,
443 u32 val, void __iomem *addr)
444 {
445 if (edma->big_endian)
446 iowrite32be(val, addr);
447 else
448 iowrite32(val, addr);
449
450 trace_edma_writel(edma, addr, val);
451 }
452
edma_writeq(struct fsl_edma_engine * edma,u64 val,void __iomem * addr)453 static inline void edma_writeq(struct fsl_edma_engine *edma,
454 u64 val, void __iomem *addr)
455 {
456 if (edma->big_endian) {
457 iowrite32be(val & 0xFFFFFFFF, addr);
458 iowrite32be(val >> 32, addr + 4);
459 } else {
460 iowrite32(val & 0xFFFFFFFF, addr);
461 iowrite32(val >> 32, addr + 4);
462 }
463
464 trace_edma_writel(edma, addr, val & 0xFFFFFFFF);
465 trace_edma_writel(edma, addr + 4, val >> 32);
466 }
467
to_fsl_edma_chan(struct dma_chan * chan)468 static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
469 {
470 return container_of(chan, struct fsl_edma_chan, vchan.chan);
471 }
472
to_fsl_edma_desc(struct virt_dma_desc * vd)473 static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
474 {
475 return container_of(vd, struct fsl_edma_desc, vdesc);
476 }
477
fsl_edma_err_chan_handler(struct fsl_edma_chan * fsl_chan)478 static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
479 {
480 fsl_chan->status = DMA_ERROR;
481 }
482
483 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
484 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
485 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
486 unsigned int slot, bool enable);
487 void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
488 int fsl_edma_terminate_all(struct dma_chan *chan);
489 int fsl_edma_pause(struct dma_chan *chan);
490 int fsl_edma_resume(struct dma_chan *chan);
491 int fsl_edma_slave_config(struct dma_chan *chan,
492 struct dma_slave_config *cfg);
493 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
494 dma_cookie_t cookie, struct dma_tx_state *txstate);
495 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
496 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
497 size_t period_len, enum dma_transfer_direction direction,
498 unsigned long flags);
499 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
500 struct dma_chan *chan, struct scatterlist *sgl,
501 unsigned int sg_len, enum dma_transfer_direction direction,
502 unsigned long flags, void *context);
503 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
504 struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
505 size_t len, unsigned long flags);
506 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
507 void fsl_edma_issue_pending(struct dma_chan *chan);
508 int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
509 void fsl_edma_free_chan_resources(struct dma_chan *chan);
510 void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
511 void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
512
513 #endif /* _FSL_EDMA_COMMON_H_ */
514