xref: /linux/drivers/dma/fsl-edma-common.c (revision 990fa99821b3349a766881c9f152561f722213e3)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5 
6 #include <linux/cleanup.h>
7 #include <linux/clk.h>
8 #include <linux/dmapool.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 
15 #include "fsl-edma-common.h"
16 
17 #define EDMA_CR			0x00
18 #define EDMA_ES			0x04
19 #define EDMA_ERQ		0x0C
20 #define EDMA_EEI		0x14
21 #define EDMA_SERQ		0x1B
22 #define EDMA_CERQ		0x1A
23 #define EDMA_SEEI		0x19
24 #define EDMA_CEEI		0x18
25 #define EDMA_CINT		0x1F
26 #define EDMA_CERR		0x1E
27 #define EDMA_SSRT		0x1D
28 #define EDMA_CDNE		0x1C
29 #define EDMA_INTR		0x24
30 #define EDMA_ERR		0x2C
31 
32 #define EDMA64_ERQH		0x08
33 #define EDMA64_EEIH		0x10
34 #define EDMA64_SERQ		0x18
35 #define EDMA64_CERQ		0x19
36 #define EDMA64_SEEI		0x1a
37 #define EDMA64_CEEI		0x1b
38 #define EDMA64_CINT		0x1c
39 #define EDMA64_CERR		0x1d
40 #define EDMA64_SSRT		0x1e
41 #define EDMA64_CDNE		0x1f
42 #define EDMA64_INTH		0x20
43 #define EDMA64_INTL		0x24
44 #define EDMA64_ERRH		0x28
45 #define EDMA64_ERRL		0x2c
46 
fsl_edma_tx_chan_handler(struct fsl_edma_chan * fsl_chan)47 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
48 {
49 	spin_lock(&fsl_chan->vchan.lock);
50 
51 	if (!fsl_chan->edesc) {
52 		/* terminate_all called before */
53 		spin_unlock(&fsl_chan->vchan.lock);
54 		return;
55 	}
56 
57 	if (!fsl_chan->edesc->iscyclic) {
58 		list_del(&fsl_chan->edesc->vdesc.node);
59 		vchan_cookie_complete(&fsl_chan->edesc->vdesc);
60 		fsl_chan->edesc = NULL;
61 		fsl_chan->status = DMA_COMPLETE;
62 	} else {
63 		vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
64 	}
65 
66 	if (!fsl_chan->edesc)
67 		fsl_edma_xfer_desc(fsl_chan);
68 
69 	spin_unlock(&fsl_chan->vchan.lock);
70 }
71 
fsl_edma3_enable_request(struct fsl_edma_chan * fsl_chan)72 static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
73 {
74 	u32 val, flags;
75 
76 	flags = fsl_edma_drvflags(fsl_chan);
77 	val = edma_readl_chreg(fsl_chan, ch_sbr);
78 	if (fsl_chan->is_rxchan)
79 		val |= EDMA_V3_CH_SBR_RD;
80 	else
81 		val |= EDMA_V3_CH_SBR_WR;
82 
83 	if (fsl_chan->is_remote)
84 		val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
85 
86 	edma_writel_chreg(fsl_chan, val, ch_sbr);
87 
88 	if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
89 		/*
90 		 * ch_mux: With the exception of 0, attempts to write a value
91 		 * already in use will be forced to 0.
92 		 */
93 		if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
94 			edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
95 	}
96 
97 	val = edma_readl_chreg(fsl_chan, ch_csr);
98 	val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI;
99 	edma_writel_chreg(fsl_chan, val, ch_csr);
100 }
101 
fsl_edma_enable_request(struct fsl_edma_chan * fsl_chan)102 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
103 {
104 	struct edma_regs *regs = &fsl_chan->edma->regs;
105 	u32 ch = fsl_chan->vchan.chan.chan_id;
106 
107 	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
108 		return fsl_edma3_enable_request(fsl_chan);
109 
110 	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
111 		edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
112 		edma_writeb(fsl_chan->edma, ch, regs->serq);
113 	} else {
114 		/* ColdFire is big endian, and accesses natively
115 		 * big endian I/O peripherals
116 		 */
117 		iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
118 		iowrite8(ch, regs->serq);
119 	}
120 }
121 
fsl_edma3_disable_request(struct fsl_edma_chan * fsl_chan)122 static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
123 {
124 	u32 val = edma_readl_chreg(fsl_chan, ch_csr);
125 	u32 flags;
126 
127 	flags = fsl_edma_drvflags(fsl_chan);
128 
129 	if (flags & FSL_EDMA_DRV_HAS_CHMUX)
130 		edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
131 
132 	val &= ~EDMA_V3_CH_CSR_ERQ;
133 	edma_writel_chreg(fsl_chan, val, ch_csr);
134 }
135 
fsl_edma_disable_request(struct fsl_edma_chan * fsl_chan)136 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
137 {
138 	struct edma_regs *regs = &fsl_chan->edma->regs;
139 	u32 ch = fsl_chan->vchan.chan.chan_id;
140 
141 	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
142 		return fsl_edma3_disable_request(fsl_chan);
143 
144 	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
145 		edma_writeb(fsl_chan->edma, ch, regs->cerq);
146 		edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
147 	} else {
148 		/* ColdFire is big endian, and accesses natively
149 		 * big endian I/O peripherals
150 		 */
151 		iowrite8(ch, regs->cerq);
152 		iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
153 	}
154 }
155 
mux_configure8(struct fsl_edma_chan * fsl_chan,void __iomem * addr,u32 off,u32 slot,bool enable)156 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
157 			   u32 off, u32 slot, bool enable)
158 {
159 	u8 val8;
160 
161 	if (enable)
162 		val8 = EDMAMUX_CHCFG_ENBL | slot;
163 	else
164 		val8 = EDMAMUX_CHCFG_DIS;
165 
166 	iowrite8(val8, addr + off);
167 }
168 
mux_configure32(struct fsl_edma_chan * fsl_chan,void __iomem * addr,u32 off,u32 slot,bool enable)169 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
170 			    u32 off, u32 slot, bool enable)
171 {
172 	u32 val;
173 
174 	if (enable)
175 		val = EDMAMUX_CHCFG_ENBL << 24 | slot;
176 	else
177 		val = EDMAMUX_CHCFG_DIS;
178 
179 	iowrite32(val, addr + off * 4);
180 }
181 
fsl_edma_chan_mux(struct fsl_edma_chan * fsl_chan,unsigned int slot,bool enable)182 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
183 		       unsigned int slot, bool enable)
184 {
185 	u32 ch = fsl_chan->vchan.chan.chan_id;
186 	void __iomem *muxaddr;
187 	unsigned int chans_per_mux, ch_off;
188 	int endian_diff[4] = {3, 1, -1, -3};
189 	u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
190 
191 	if (!dmamux_nr)
192 		return;
193 
194 	chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
195 	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
196 
197 	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
198 		ch_off += endian_diff[ch_off % 4];
199 
200 	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
201 	slot = EDMAMUX_CHCFG_SOURCE(slot);
202 
203 	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
204 		mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
205 	else
206 		mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
207 }
208 
fsl_edma_get_tcd_attr(enum dma_slave_buswidth src_addr_width,enum dma_slave_buswidth dst_addr_width)209 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth src_addr_width,
210 					  enum dma_slave_buswidth dst_addr_width)
211 {
212 	u32 src_val, dst_val;
213 
214 	if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
215 		src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
216 	if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
217 		dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
218 
219 	src_val = ffs(src_addr_width) - 1;
220 	dst_val = ffs(dst_addr_width) - 1;
221 	return dst_val | (src_val << 8);
222 }
223 
fsl_edma_free_desc(struct virt_dma_desc * vdesc)224 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
225 {
226 	struct fsl_edma_desc *fsl_desc;
227 	int i;
228 
229 	fsl_desc = to_fsl_edma_desc(vdesc);
230 	for (i = 0; i < fsl_desc->n_tcds; i++)
231 		dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
232 			      fsl_desc->tcd[i].ptcd);
233 	kfree(fsl_desc);
234 }
235 
fsl_edma_terminate_all(struct dma_chan * chan)236 int fsl_edma_terminate_all(struct dma_chan *chan)
237 {
238 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
239 	unsigned long flags;
240 	LIST_HEAD(head);
241 
242 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
243 	fsl_edma_disable_request(fsl_chan);
244 	fsl_chan->edesc = NULL;
245 	fsl_chan->status = DMA_COMPLETE;
246 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
247 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
248 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
249 
250 	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
251 		pm_runtime_allow(fsl_chan->pd_dev);
252 
253 	return 0;
254 }
255 
fsl_edma_pause(struct dma_chan * chan)256 int fsl_edma_pause(struct dma_chan *chan)
257 {
258 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
259 	unsigned long flags;
260 
261 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
262 	if (fsl_chan->edesc) {
263 		fsl_edma_disable_request(fsl_chan);
264 		fsl_chan->status = DMA_PAUSED;
265 	}
266 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
267 	return 0;
268 }
269 
fsl_edma_resume(struct dma_chan * chan)270 int fsl_edma_resume(struct dma_chan *chan)
271 {
272 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
273 	unsigned long flags;
274 
275 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
276 	if (fsl_chan->edesc) {
277 		fsl_edma_enable_request(fsl_chan);
278 		fsl_chan->status = DMA_IN_PROGRESS;
279 	}
280 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
281 	return 0;
282 }
283 
fsl_edma_unprep_slave_dma(struct fsl_edma_chan * fsl_chan)284 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
285 {
286 	if (fsl_chan->dma_dir != DMA_NONE)
287 		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
288 				   fsl_chan->dma_dev_addr,
289 				   fsl_chan->dma_dev_size,
290 				   fsl_chan->dma_dir, 0);
291 	fsl_chan->dma_dir = DMA_NONE;
292 }
293 
fsl_edma_prep_slave_dma(struct fsl_edma_chan * fsl_chan,enum dma_transfer_direction dir)294 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
295 				    enum dma_transfer_direction dir)
296 {
297 	struct device *dev = fsl_chan->vchan.chan.device->dev;
298 	enum dma_data_direction dma_dir;
299 	phys_addr_t addr = 0;
300 	u32 size = 0;
301 
302 	switch (dir) {
303 	case DMA_MEM_TO_DEV:
304 		dma_dir = DMA_FROM_DEVICE;
305 		addr = fsl_chan->cfg.dst_addr;
306 		size = fsl_chan->cfg.dst_maxburst;
307 		break;
308 	case DMA_DEV_TO_MEM:
309 		dma_dir = DMA_TO_DEVICE;
310 		addr = fsl_chan->cfg.src_addr;
311 		size = fsl_chan->cfg.src_maxburst;
312 		break;
313 	default:
314 		dma_dir = DMA_NONE;
315 		break;
316 	}
317 
318 	/* Already mapped for this config? */
319 	if (fsl_chan->dma_dir == dma_dir)
320 		return true;
321 
322 	fsl_edma_unprep_slave_dma(fsl_chan);
323 
324 	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
325 	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
326 		return false;
327 	fsl_chan->dma_dev_size = size;
328 	fsl_chan->dma_dir = dma_dir;
329 
330 	return true;
331 }
332 
fsl_edma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)333 int fsl_edma_slave_config(struct dma_chan *chan,
334 				 struct dma_slave_config *cfg)
335 {
336 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
337 
338 	memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
339 	fsl_edma_unprep_slave_dma(fsl_chan);
340 
341 	return 0;
342 }
343 
fsl_edma_desc_residue(struct fsl_edma_chan * fsl_chan,struct virt_dma_desc * vdesc,bool in_progress)344 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
345 		struct virt_dma_desc *vdesc, bool in_progress)
346 {
347 	struct fsl_edma_desc *edesc = fsl_chan->edesc;
348 	enum dma_transfer_direction dir = edesc->dirn;
349 	dma_addr_t cur_addr, dma_addr, old_addr;
350 	size_t len, size;
351 	u32 nbytes = 0;
352 	int i;
353 
354 	/* calculate the total size in this desc */
355 	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
356 		nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
357 		if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
358 			nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
359 		len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
360 	}
361 
362 	if (!in_progress)
363 		return len;
364 
365 	/* 64bit read is not atomic, need read retry when high 32bit changed */
366 	do {
367 		if (dir == DMA_MEM_TO_DEV) {
368 			old_addr = edma_read_tcdreg(fsl_chan, saddr);
369 			cur_addr = edma_read_tcdreg(fsl_chan, saddr);
370 		} else {
371 			old_addr = edma_read_tcdreg(fsl_chan, daddr);
372 			cur_addr = edma_read_tcdreg(fsl_chan, daddr);
373 		}
374 	} while (upper_32_bits(cur_addr) != upper_32_bits(old_addr));
375 
376 	/* figure out the finished and calculate the residue */
377 	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
378 		nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
379 		if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
380 			nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
381 
382 		size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
383 
384 		if (dir == DMA_MEM_TO_DEV)
385 			dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
386 		else
387 			dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
388 
389 		len -= size;
390 		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
391 			len += dma_addr + size - cur_addr;
392 			break;
393 		}
394 	}
395 
396 	return len;
397 }
398 
fsl_edma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)399 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
400 		dma_cookie_t cookie, struct dma_tx_state *txstate)
401 {
402 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
403 	struct virt_dma_desc *vdesc;
404 	enum dma_status status;
405 	unsigned long flags;
406 
407 	status = dma_cookie_status(chan, cookie, txstate);
408 	if (status == DMA_COMPLETE)
409 		return status;
410 
411 	if (!txstate)
412 		return fsl_chan->status;
413 
414 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
415 	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
416 	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
417 		txstate->residue =
418 			fsl_edma_desc_residue(fsl_chan, vdesc, true);
419 	else if (vdesc)
420 		txstate->residue =
421 			fsl_edma_desc_residue(fsl_chan, vdesc, false);
422 	else
423 		txstate->residue = 0;
424 
425 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
426 
427 	return fsl_chan->status;
428 }
429 
fsl_edma_set_tcd_regs(struct fsl_edma_chan * fsl_chan,void * tcd)430 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
431 {
432 	u16 csr = 0;
433 
434 	/*
435 	 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
436 	 * endian format. However, we need to load the TCD registers in
437 	 * big- or little-endian obeying the eDMA engine model endian,
438 	 * and this is performed from specific edma_write functions
439 	 */
440 	edma_write_tcdreg(fsl_chan, 0, csr);
441 
442 	edma_cp_tcd_to_reg(fsl_chan, tcd, saddr);
443 	edma_cp_tcd_to_reg(fsl_chan, tcd, daddr);
444 
445 	edma_cp_tcd_to_reg(fsl_chan, tcd, attr);
446 	edma_cp_tcd_to_reg(fsl_chan, tcd, soff);
447 
448 	edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes);
449 	edma_cp_tcd_to_reg(fsl_chan, tcd, slast);
450 
451 	edma_cp_tcd_to_reg(fsl_chan, tcd, citer);
452 	edma_cp_tcd_to_reg(fsl_chan, tcd, biter);
453 	edma_cp_tcd_to_reg(fsl_chan, tcd, doff);
454 
455 	edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga);
456 
457 	csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
458 
459 	if (fsl_chan->is_sw) {
460 		csr |= EDMA_TCD_CSR_START;
461 		fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
462 	}
463 
464 	/*
465 	 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
466 	 * eDMAv4 have not such requirement.
467 	 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
468 	 */
469 	if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
470 		(csr & EDMA_TCD_CSR_E_SG)) ||
471 	    ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
472 		(csr & EDMA_TCD_CSR_E_LINK)))
473 		edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
474 
475 
476 	edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
477 }
478 
479 static inline
fsl_edma_fill_tcd(struct fsl_edma_chan * fsl_chan,struct fsl_edma_hw_tcd * tcd,dma_addr_t src,dma_addr_t dst,u16 attr,u16 soff,u32 nbytes,dma_addr_t slast,u16 citer,u16 biter,u16 doff,dma_addr_t dlast_sga,bool major_int,bool disable_req,bool enable_sg)480 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
481 		       struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
482 		       u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
483 		       u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
484 		       bool disable_req, bool enable_sg)
485 {
486 	struct dma_slave_config *cfg = &fsl_chan->cfg;
487 	u32 burst = 0;
488 	u16 csr = 0;
489 
490 	/*
491 	 * eDMA hardware SGs require the TCDs to be stored in little
492 	 * endian format irrespective of the register endian model.
493 	 * So we put the value in little endian in memory, waiting
494 	 * for fsl_edma_set_tcd_regs doing the swap.
495 	 */
496 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr);
497 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr);
498 
499 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr);
500 
501 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
502 
503 	/* If we expect to have either multi_fifo or a port window size,
504 	 * we will use minor loop offset, meaning bits 29-10 will be used for
505 	 * address offset, while bits 9-0 will be used to tell DMA how much
506 	 * data to read from addr.
507 	 * If we don't have either of those, will use a major loop reading from addr
508 	 * nbytes (29bits).
509 	 */
510 	if (cfg->direction == DMA_MEM_TO_DEV) {
511 		if (fsl_chan->is_multi_fifo)
512 			burst = cfg->dst_maxburst * 4;
513 		if (cfg->dst_port_window_size)
514 			burst = cfg->dst_port_window_size * cfg->dst_addr_width;
515 		if (burst) {
516 			nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
517 			nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
518 			nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
519 		}
520 	} else {
521 		if (fsl_chan->is_multi_fifo)
522 			burst = cfg->src_maxburst * 4;
523 		if (cfg->src_port_window_size)
524 			burst = cfg->src_port_window_size * cfg->src_addr_width;
525 		if (burst) {
526 			nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
527 			nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
528 			nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
529 		}
530 	}
531 
532 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes);
533 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast);
534 
535 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
536 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
537 
538 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
539 
540 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
541 
542 	if (major_int)
543 		csr |= EDMA_TCD_CSR_INT_MAJOR;
544 
545 	if (disable_req)
546 		csr |= EDMA_TCD_CSR_D_REQ;
547 
548 	if (enable_sg)
549 		csr |= EDMA_TCD_CSR_E_SG;
550 
551 	if (fsl_chan->is_rxchan)
552 		csr |= EDMA_TCD_CSR_ACTIVE;
553 
554 	if (fsl_chan->is_sw)
555 		csr |= EDMA_TCD_CSR_START;
556 
557 	fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
558 
559 	trace_edma_fill_tcd(fsl_chan, tcd);
560 }
561 
fsl_edma_alloc_desc(struct fsl_edma_chan * fsl_chan,int sg_len)562 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
563 		int sg_len)
564 {
565 	struct fsl_edma_desc *fsl_desc;
566 	int i;
567 
568 	fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
569 	if (!fsl_desc)
570 		return NULL;
571 
572 	fsl_desc->echan = fsl_chan;
573 	fsl_desc->n_tcds = sg_len;
574 	for (i = 0; i < sg_len; i++) {
575 		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
576 					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
577 		if (!fsl_desc->tcd[i].vtcd)
578 			goto err;
579 	}
580 	return fsl_desc;
581 
582 err:
583 	while (--i >= 0)
584 		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
585 				fsl_desc->tcd[i].ptcd);
586 	kfree(fsl_desc);
587 	return NULL;
588 }
589 
fsl_edma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)590 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
591 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
592 		size_t period_len, enum dma_transfer_direction direction,
593 		unsigned long flags)
594 {
595 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
596 	struct fsl_edma_desc *fsl_desc;
597 	dma_addr_t dma_buf_next;
598 	bool major_int = true;
599 	int sg_len, i;
600 	dma_addr_t src_addr, dst_addr, last_sg;
601 	u16 soff, doff, iter;
602 	u32 nbytes;
603 
604 	if (!is_slave_direction(direction))
605 		return NULL;
606 
607 	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
608 		return NULL;
609 
610 	sg_len = buf_len / period_len;
611 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
612 	if (!fsl_desc)
613 		return NULL;
614 	fsl_desc->iscyclic = true;
615 	fsl_desc->dirn = direction;
616 
617 	dma_buf_next = dma_addr;
618 	if (direction == DMA_MEM_TO_DEV) {
619 		if (!fsl_chan->cfg.src_addr_width)
620 			fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
621 		fsl_chan->attr =
622 			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
623 					      fsl_chan->cfg.dst_addr_width);
624 		nbytes = fsl_chan->cfg.dst_addr_width *
625 			fsl_chan->cfg.dst_maxburst;
626 	} else {
627 		if (!fsl_chan->cfg.dst_addr_width)
628 			fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
629 		fsl_chan->attr =
630 			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
631 					      fsl_chan->cfg.dst_addr_width);
632 		nbytes = fsl_chan->cfg.src_addr_width *
633 			fsl_chan->cfg.src_maxburst;
634 	}
635 
636 	iter = period_len / nbytes;
637 
638 	for (i = 0; i < sg_len; i++) {
639 		if (dma_buf_next >= dma_addr + buf_len)
640 			dma_buf_next = dma_addr;
641 
642 		/* get next sg's physical address */
643 		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
644 
645 		if (direction == DMA_MEM_TO_DEV) {
646 			src_addr = dma_buf_next;
647 			dst_addr = fsl_chan->dma_dev_addr;
648 			soff = fsl_chan->cfg.dst_addr_width;
649 			doff = fsl_chan->is_multi_fifo ? 4 : 0;
650 			if (fsl_chan->cfg.dst_port_window_size)
651 				doff = fsl_chan->cfg.dst_addr_width;
652 		} else if (direction == DMA_DEV_TO_MEM) {
653 			src_addr = fsl_chan->dma_dev_addr;
654 			dst_addr = dma_buf_next;
655 			soff = fsl_chan->is_multi_fifo ? 4 : 0;
656 			doff = fsl_chan->cfg.src_addr_width;
657 			if (fsl_chan->cfg.src_port_window_size)
658 				soff = fsl_chan->cfg.src_addr_width;
659 		} else {
660 			/* DMA_DEV_TO_DEV */
661 			src_addr = fsl_chan->cfg.src_addr;
662 			dst_addr = fsl_chan->cfg.dst_addr;
663 			soff = doff = 0;
664 			major_int = false;
665 		}
666 
667 		fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
668 				  fsl_chan->attr, soff, nbytes, 0, iter,
669 				  iter, doff, last_sg, major_int, false, true);
670 		dma_buf_next += period_len;
671 	}
672 
673 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
674 }
675 
fsl_edma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)676 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
677 		struct dma_chan *chan, struct scatterlist *sgl,
678 		unsigned int sg_len, enum dma_transfer_direction direction,
679 		unsigned long flags, void *context)
680 {
681 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
682 	struct fsl_edma_desc *fsl_desc;
683 	struct scatterlist *sg;
684 	dma_addr_t src_addr, dst_addr, last_sg;
685 	u16 soff, doff, iter;
686 	u32 nbytes;
687 	int i;
688 
689 	if (!is_slave_direction(direction))
690 		return NULL;
691 
692 	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
693 		return NULL;
694 
695 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
696 	if (!fsl_desc)
697 		return NULL;
698 	fsl_desc->iscyclic = false;
699 	fsl_desc->dirn = direction;
700 
701 	if (direction == DMA_MEM_TO_DEV) {
702 		if (!fsl_chan->cfg.src_addr_width)
703 			fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
704 		fsl_chan->attr =
705 			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
706 					      fsl_chan->cfg.dst_addr_width);
707 		nbytes = fsl_chan->cfg.dst_addr_width *
708 			fsl_chan->cfg.dst_maxburst;
709 	} else {
710 		if (!fsl_chan->cfg.dst_addr_width)
711 			fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
712 		fsl_chan->attr =
713 			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
714 					      fsl_chan->cfg.dst_addr_width);
715 		nbytes = fsl_chan->cfg.src_addr_width *
716 			fsl_chan->cfg.src_maxburst;
717 	}
718 
719 	for_each_sg(sgl, sg, sg_len, i) {
720 		if (direction == DMA_MEM_TO_DEV) {
721 			src_addr = sg_dma_address(sg);
722 			dst_addr = fsl_chan->dma_dev_addr;
723 			soff = fsl_chan->cfg.dst_addr_width;
724 			doff = 0;
725 		} else if (direction == DMA_DEV_TO_MEM) {
726 			src_addr = fsl_chan->dma_dev_addr;
727 			dst_addr = sg_dma_address(sg);
728 			soff = 0;
729 			doff = fsl_chan->cfg.src_addr_width;
730 		} else {
731 			/* DMA_DEV_TO_DEV */
732 			src_addr = fsl_chan->cfg.src_addr;
733 			dst_addr = fsl_chan->cfg.dst_addr;
734 			soff = 0;
735 			doff = 0;
736 		}
737 
738 		/*
739 		 * Choose the suitable burst length if sg_dma_len is not
740 		 * multiple of burst length so that the whole transfer length is
741 		 * multiple of minor loop(burst length).
742 		 */
743 		if (sg_dma_len(sg) % nbytes) {
744 			u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
745 			u32 burst = (direction == DMA_DEV_TO_MEM) ?
746 						fsl_chan->cfg.src_maxburst :
747 						fsl_chan->cfg.dst_maxburst;
748 			int j;
749 
750 			for (j = burst; j > 1; j--) {
751 				if (!(sg_dma_len(sg) % (j * width))) {
752 					nbytes = j * width;
753 					break;
754 				}
755 			}
756 			/* Set burst size as 1 if there's no suitable one */
757 			if (j == 1)
758 				nbytes = width;
759 		}
760 		iter = sg_dma_len(sg) / nbytes;
761 		if (i < sg_len - 1) {
762 			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
763 			fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
764 					  dst_addr, fsl_chan->attr, soff,
765 					  nbytes, 0, iter, iter, doff, last_sg,
766 					  false, false, true);
767 		} else {
768 			last_sg = 0;
769 			fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
770 					  dst_addr, fsl_chan->attr, soff,
771 					  nbytes, 0, iter, iter, doff, last_sg,
772 					  true, true, false);
773 		}
774 	}
775 
776 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
777 }
778 
fsl_edma_prep_memcpy(struct dma_chan * chan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)779 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
780 						     dma_addr_t dma_dst, dma_addr_t dma_src,
781 						     size_t len, unsigned long flags)
782 {
783 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
784 	struct fsl_edma_desc *fsl_desc;
785 	u32 src_bus_width, dst_bus_width;
786 
787 	src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_src) - 1));
788 	dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_dst) - 1));
789 
790 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
791 	if (!fsl_desc)
792 		return NULL;
793 	fsl_desc->iscyclic = false;
794 
795 	fsl_chan->is_sw = true;
796 	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
797 		fsl_chan->is_remote = true;
798 
799 	/* To match with copy_align and max_seg_size so 1 tcd is enough */
800 	fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
801 			fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
802 			src_bus_width, len, 0, 1, 1, dst_bus_width, 0, true,
803 			true, false);
804 
805 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
806 }
807 
fsl_edma_xfer_desc(struct fsl_edma_chan * fsl_chan)808 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
809 {
810 	struct virt_dma_desc *vdesc;
811 
812 	lockdep_assert_held(&fsl_chan->vchan.lock);
813 
814 	vdesc = vchan_next_desc(&fsl_chan->vchan);
815 	if (!vdesc)
816 		return;
817 	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
818 	fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
819 	fsl_edma_enable_request(fsl_chan);
820 	fsl_chan->status = DMA_IN_PROGRESS;
821 }
822 
fsl_edma_issue_pending(struct dma_chan * chan)823 void fsl_edma_issue_pending(struct dma_chan *chan)
824 {
825 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
826 	unsigned long flags;
827 
828 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
829 
830 	if (unlikely(fsl_chan->pm_state != RUNNING)) {
831 		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
832 		/* cannot submit due to suspend */
833 		return;
834 	}
835 
836 	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
837 		fsl_edma_xfer_desc(fsl_chan);
838 
839 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
840 }
841 
fsl_edma_alloc_chan_resources(struct dma_chan * chan)842 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
843 {
844 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
845 	int ret = 0;
846 
847 	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
848 		clk_prepare_enable(fsl_chan->clk);
849 
850 	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
851 				fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
852 				sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
853 				32, 0);
854 
855 	if (fsl_chan->txirq)
856 		ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
857 				 fsl_chan->chan_name, fsl_chan);
858 
859 	if (ret)
860 		goto err_txirq;
861 
862 	if (fsl_chan->errirq > 0)
863 		ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED,
864 				  fsl_chan->errirq_name, fsl_chan);
865 
866 	if (ret)
867 		goto err_errirq;
868 
869 	return 0;
870 
871 err_errirq:
872 	if (fsl_chan->txirq)
873 		free_irq(fsl_chan->txirq, fsl_chan);
874 err_txirq:
875 	dma_pool_destroy(fsl_chan->tcd_pool);
876 
877 	return ret;
878 }
879 
fsl_edma_free_chan_resources(struct dma_chan * chan)880 void fsl_edma_free_chan_resources(struct dma_chan *chan)
881 {
882 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
883 	struct fsl_edma_engine *edma = fsl_chan->edma;
884 	unsigned long flags;
885 	LIST_HEAD(head);
886 
887 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
888 	fsl_edma_disable_request(fsl_chan);
889 	if (edma->drvdata->dmamuxs)
890 		fsl_edma_chan_mux(fsl_chan, 0, false);
891 	fsl_chan->edesc = NULL;
892 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
893 	fsl_edma_unprep_slave_dma(fsl_chan);
894 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
895 
896 	if (fsl_chan->txirq)
897 		free_irq(fsl_chan->txirq, fsl_chan);
898 	if (fsl_chan->errirq)
899 		free_irq(fsl_chan->errirq, fsl_chan);
900 
901 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
902 	dma_pool_destroy(fsl_chan->tcd_pool);
903 	fsl_chan->tcd_pool = NULL;
904 	fsl_chan->is_sw = false;
905 	fsl_chan->srcid = 0;
906 	fsl_chan->is_remote = false;
907 	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
908 		clk_disable_unprepare(fsl_chan->clk);
909 }
910 
fsl_edma_cleanup_vchan(struct dma_device * dmadev)911 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
912 {
913 	struct fsl_edma_chan *chan, *_chan;
914 
915 	list_for_each_entry_safe(chan, _chan,
916 				&dmadev->channels, vchan.chan.device_node) {
917 		list_del(&chan->vchan.chan.device_node);
918 		tasklet_kill(&chan->vchan.task);
919 	}
920 }
921 
922 /*
923  * On the 32 channels Vybrid/mpc577x edma version, register offsets are
924  * different compared to ColdFire mcf5441x 64 channels edma.
925  *
926  * This function sets up register offsets as per proper declared version
927  * so must be called in xxx_edma_probe() just after setting the
928  * edma "version" and "membase" appropriately.
929  */
fsl_edma_setup_regs(struct fsl_edma_engine * edma)930 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
931 {
932 	bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
933 
934 	edma->regs.cr = edma->membase + EDMA_CR;
935 	edma->regs.es = edma->membase + EDMA_ES;
936 	edma->regs.erql = edma->membase + EDMA_ERQ;
937 	edma->regs.eeil = edma->membase + EDMA_EEI;
938 
939 	edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
940 	edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
941 	edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
942 	edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
943 	edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
944 	edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
945 	edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
946 	edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
947 	edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
948 	edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
949 
950 	if (is64) {
951 		edma->regs.erqh = edma->membase + EDMA64_ERQH;
952 		edma->regs.eeih = edma->membase + EDMA64_EEIH;
953 		edma->regs.errh = edma->membase + EDMA64_ERRH;
954 		edma->regs.inth = edma->membase + EDMA64_INTH;
955 	}
956 }
957 
958 MODULE_LICENSE("GPL v2");
959