xref: /linux/drivers/dma/xilinx/xdma.c (revision 5e2cb28dd7e182dfa641550dfa225913509ad45d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DMA driver for Xilinx DMA/Bridge Subsystem
4  *
5  * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6  * Copyright (C) 2022, Advanced Micro Devices, Inc.
7  */
8 
9 /*
10  * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11  * between Host memory and the DMA subsystem. It does this by operating on
12  * 'descriptors' that contain information about the source, destination and
13  * amount of data to transfer. These direct memory transfers can be both in
14  * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15  * configured to have a single AXI4 Master interface shared by all channels
16  * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17  * specified on a per-channel basis in descriptor linked lists, which the DMA
18  * fetches from host memory and processes. Events such as descriptor completion
19  * and errors are signaled using interrupts. The core also provides up to 16
20  * user interrupt wires that generate interrupts to the host.
21  */
22 
23 #include <linux/mod_devicetable.h>
24 #include <linux/bitfield.h>
25 #include <linux/dmapool.h>
26 #include <linux/regmap.h>
27 #include <linux/dmaengine.h>
28 #include <linux/dma/amd_xdma.h>
29 #include <linux/platform_device.h>
30 #include <linux/platform_data/amd_xdma.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/pci.h>
33 #include "../virt-dma.h"
34 #include "xdma-regs.h"
35 
36 /* mmio regmap config for all XDMA registers */
37 static const struct regmap_config xdma_regmap_config = {
38 	.reg_bits = 32,
39 	.val_bits = 32,
40 	.reg_stride = 4,
41 	.max_register = XDMA_REG_SPACE_LEN,
42 };
43 
44 /**
45  * struct xdma_desc_block - Descriptor block
46  * @virt_addr: Virtual address of block start
47  * @dma_addr: DMA address of block start
48  */
49 struct xdma_desc_block {
50 	void		*virt_addr;
51 	dma_addr_t	dma_addr;
52 };
53 
54 /**
55  * struct xdma_chan - Driver specific DMA channel structure
56  * @vchan: Virtual channel
57  * @xdev_hdl: Pointer to DMA device structure
58  * @base: Offset of channel registers
59  * @desc_pool: Descriptor pool
60  * @busy: Busy flag of the channel
61  * @dir: Transferring direction of the channel
62  * @cfg: Transferring config of the channel
63  * @irq: IRQ assigned to the channel
64  */
65 struct xdma_chan {
66 	struct virt_dma_chan		vchan;
67 	void				*xdev_hdl;
68 	u32				base;
69 	struct dma_pool			*desc_pool;
70 	bool				busy;
71 	enum dma_transfer_direction	dir;
72 	struct dma_slave_config		cfg;
73 	u32				irq;
74 };
75 
76 /**
77  * struct xdma_desc - DMA desc structure
78  * @vdesc: Virtual DMA descriptor
79  * @chan: DMA channel pointer
80  * @dir: Transferring direction of the request
81  * @dev_addr: Physical address on DMA device side
82  * @desc_blocks: Hardware descriptor blocks
83  * @dblk_num: Number of hardware descriptor blocks
84  * @desc_num: Number of hardware descriptors
85  * @completed_desc_num: Completed hardware descriptors
86  * @cyclic: Cyclic transfer vs. scatter-gather
87  * @periods: Number of periods in the cyclic transfer
88  * @period_size: Size of a period in bytes in cyclic transfers
89  */
90 struct xdma_desc {
91 	struct virt_dma_desc		vdesc;
92 	struct xdma_chan		*chan;
93 	enum dma_transfer_direction	dir;
94 	u64				dev_addr;
95 	struct xdma_desc_block		*desc_blocks;
96 	u32				dblk_num;
97 	u32				desc_num;
98 	u32				completed_desc_num;
99 	bool				cyclic;
100 	u32				periods;
101 	u32				period_size;
102 };
103 
104 #define XDMA_DEV_STATUS_REG_DMA		BIT(0)
105 #define XDMA_DEV_STATUS_INIT_MSIX	BIT(1)
106 
107 /**
108  * struct xdma_device - DMA device structure
109  * @pdev: Platform device pointer
110  * @dma_dev: DMA device structure
111  * @rmap: MMIO regmap for DMA registers
112  * @h2c_chans: Host to Card channels
113  * @c2h_chans: Card to Host channels
114  * @h2c_chan_num: Number of H2C channels
115  * @c2h_chan_num: Number of C2H channels
116  * @irq_start: Start IRQ assigned to device
117  * @irq_num: Number of IRQ assigned to device
118  * @status: Initialization status
119  */
120 struct xdma_device {
121 	struct platform_device	*pdev;
122 	struct dma_device	dma_dev;
123 	struct regmap		*rmap;
124 	struct xdma_chan	*h2c_chans;
125 	struct xdma_chan	*c2h_chans;
126 	u32			h2c_chan_num;
127 	u32			c2h_chan_num;
128 	u32			irq_start;
129 	u32			irq_num;
130 	u32			status;
131 };
132 
133 #define xdma_err(xdev, fmt, args...)					\
134 	dev_err(&(xdev)->pdev->dev, fmt, ##args)
135 #define XDMA_CHAN_NUM(_xd) ({						\
136 	typeof(_xd) (xd) = (_xd);					\
137 	((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
138 
139 /* Get the last desc in a desc block */
140 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
141 {
142 	return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
143 }
144 
145 /**
146  * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
147  * @sw_desc: Tx descriptor pointer
148  */
149 static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
150 {
151 	struct xdma_desc_block *block;
152 	u32 last_blk_desc, desc_control;
153 	struct xdma_hw_desc *desc;
154 	int i;
155 
156 	desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
157 	for (i = 1; i < sw_desc->dblk_num; i++) {
158 		block = &sw_desc->desc_blocks[i - 1];
159 		desc = xdma_blk_last_desc(block);
160 
161 		if (!(i & XDMA_DESC_BLOCK_MASK)) {
162 			desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
163 			continue;
164 		}
165 		desc->control = cpu_to_le32(desc_control);
166 		desc->next_desc = cpu_to_le64(block[1].dma_addr);
167 	}
168 
169 	/* update the last block */
170 	last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
171 	if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
172 		block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
173 		desc = xdma_blk_last_desc(block);
174 		desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
175 		desc->control = cpu_to_le32(desc_control);
176 	}
177 
178 	block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
179 	desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
180 	desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
181 }
182 
183 /**
184  * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
185  * @sw_desc: Tx descriptor pointer
186  */
187 static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
188 {
189 	struct xdma_desc_block *block;
190 	struct xdma_hw_desc *desc;
191 	int i;
192 
193 	block = sw_desc->desc_blocks;
194 	for (i = 0; i < sw_desc->desc_num - 1; i++) {
195 		desc = block->virt_addr + i * XDMA_DESC_SIZE;
196 		desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
197 	}
198 	desc = block->virt_addr + i * XDMA_DESC_SIZE;
199 	desc->next_desc = cpu_to_le64(block->dma_addr);
200 }
201 
202 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
203 {
204 	return container_of(chan, struct xdma_chan, vchan.chan);
205 }
206 
207 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
208 {
209 	return container_of(vdesc, struct xdma_desc, vdesc);
210 }
211 
212 /**
213  * xdma_channel_init - Initialize DMA channel registers
214  * @chan: DMA channel pointer
215  */
216 static int xdma_channel_init(struct xdma_chan *chan)
217 {
218 	struct xdma_device *xdev = chan->xdev_hdl;
219 	int ret;
220 
221 	ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
222 			   CHAN_CTRL_NON_INCR_ADDR);
223 	if (ret)
224 		return ret;
225 
226 	ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
227 			   CHAN_IM_ALL);
228 	if (ret)
229 		return ret;
230 
231 	return 0;
232 }
233 
234 /**
235  * xdma_free_desc - Free descriptor
236  * @vdesc: Virtual DMA descriptor
237  */
238 static void xdma_free_desc(struct virt_dma_desc *vdesc)
239 {
240 	struct xdma_desc *sw_desc;
241 	int i;
242 
243 	sw_desc = to_xdma_desc(vdesc);
244 	for (i = 0; i < sw_desc->dblk_num; i++) {
245 		if (!sw_desc->desc_blocks[i].virt_addr)
246 			break;
247 		dma_pool_free(sw_desc->chan->desc_pool,
248 			      sw_desc->desc_blocks[i].virt_addr,
249 			      sw_desc->desc_blocks[i].dma_addr);
250 	}
251 	kfree(sw_desc->desc_blocks);
252 	kfree(sw_desc);
253 }
254 
255 /**
256  * xdma_alloc_desc - Allocate descriptor
257  * @chan: DMA channel pointer
258  * @desc_num: Number of hardware descriptors
259  * @cyclic: Whether this is a cyclic transfer
260  */
261 static struct xdma_desc *
262 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
263 {
264 	struct xdma_desc *sw_desc;
265 	struct xdma_hw_desc *desc;
266 	dma_addr_t dma_addr;
267 	u32 dblk_num;
268 	u32 control;
269 	void *addr;
270 	int i, j;
271 
272 	sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
273 	if (!sw_desc)
274 		return NULL;
275 
276 	sw_desc->chan = chan;
277 	sw_desc->desc_num = desc_num;
278 	sw_desc->cyclic = cyclic;
279 	dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
280 	sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
281 				       GFP_NOWAIT);
282 	if (!sw_desc->desc_blocks)
283 		goto failed;
284 
285 	if (cyclic)
286 		control = XDMA_DESC_CONTROL_CYCLIC;
287 	else
288 		control = XDMA_DESC_CONTROL(1, 0);
289 
290 	sw_desc->dblk_num = dblk_num;
291 	for (i = 0; i < sw_desc->dblk_num; i++) {
292 		addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
293 		if (!addr)
294 			goto failed;
295 
296 		sw_desc->desc_blocks[i].virt_addr = addr;
297 		sw_desc->desc_blocks[i].dma_addr = dma_addr;
298 		for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
299 			desc[j].control = cpu_to_le32(control);
300 	}
301 
302 	if (cyclic)
303 		xdma_link_cyclic_desc_blocks(sw_desc);
304 	else
305 		xdma_link_sg_desc_blocks(sw_desc);
306 
307 	return sw_desc;
308 
309 failed:
310 	xdma_free_desc(&sw_desc->vdesc);
311 	return NULL;
312 }
313 
314 /**
315  * xdma_xfer_start - Start DMA transfer
316  * @xchan: DMA channel pointer
317  */
318 static int xdma_xfer_start(struct xdma_chan *xchan)
319 {
320 	struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
321 	struct xdma_device *xdev = xchan->xdev_hdl;
322 	struct xdma_desc_block *block;
323 	u32 val, completed_blocks;
324 	struct xdma_desc *desc;
325 	int ret;
326 
327 	/*
328 	 * check if there is not any submitted descriptor or channel is busy.
329 	 * vchan lock should be held where this function is called.
330 	 */
331 	if (!vd || xchan->busy)
332 		return -EINVAL;
333 
334 	/* clear run stop bit to get ready for transfer */
335 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
336 			   CHAN_CTRL_RUN_STOP);
337 	if (ret)
338 		return ret;
339 
340 	desc = to_xdma_desc(vd);
341 	if (desc->dir != xchan->dir) {
342 		xdma_err(xdev, "incorrect request direction");
343 		return -EINVAL;
344 	}
345 
346 	/* set DMA engine to the first descriptor block */
347 	completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
348 	block = &desc->desc_blocks[completed_blocks];
349 	val = lower_32_bits(block->dma_addr);
350 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
351 	if (ret)
352 		return ret;
353 
354 	val = upper_32_bits(block->dma_addr);
355 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
356 	if (ret)
357 		return ret;
358 
359 	if (completed_blocks + 1 == desc->dblk_num)
360 		val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
361 	else
362 		val = XDMA_DESC_ADJACENT - 1;
363 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
364 	if (ret)
365 		return ret;
366 
367 	/* kick off DMA transfer */
368 	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
369 			   CHAN_CTRL_START);
370 	if (ret)
371 		return ret;
372 
373 	xchan->busy = true;
374 	return 0;
375 }
376 
377 /**
378  * xdma_alloc_channels - Detect and allocate DMA channels
379  * @xdev: DMA device pointer
380  * @dir: Channel direction
381  */
382 static int xdma_alloc_channels(struct xdma_device *xdev,
383 			       enum dma_transfer_direction dir)
384 {
385 	struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
386 	struct xdma_chan **chans, *xchan;
387 	u32 base, identifier, target;
388 	u32 *chan_num;
389 	int i, j, ret;
390 
391 	if (dir == DMA_MEM_TO_DEV) {
392 		base = XDMA_CHAN_H2C_OFFSET;
393 		target = XDMA_CHAN_H2C_TARGET;
394 		chans = &xdev->h2c_chans;
395 		chan_num = &xdev->h2c_chan_num;
396 	} else if (dir == DMA_DEV_TO_MEM) {
397 		base = XDMA_CHAN_C2H_OFFSET;
398 		target = XDMA_CHAN_C2H_TARGET;
399 		chans = &xdev->c2h_chans;
400 		chan_num = &xdev->c2h_chan_num;
401 	} else {
402 		xdma_err(xdev, "invalid direction specified");
403 		return -EINVAL;
404 	}
405 
406 	/* detect number of available DMA channels */
407 	for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
408 		ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
409 				  &identifier);
410 		if (ret)
411 			return ret;
412 
413 		/* check if it is available DMA channel */
414 		if (XDMA_CHAN_CHECK_TARGET(identifier, target))
415 			(*chan_num)++;
416 	}
417 
418 	if (!*chan_num) {
419 		xdma_err(xdev, "does not probe any channel");
420 		return -EINVAL;
421 	}
422 
423 	*chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
424 			      GFP_KERNEL);
425 	if (!*chans)
426 		return -ENOMEM;
427 
428 	for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
429 		ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
430 				  &identifier);
431 		if (ret)
432 			return ret;
433 
434 		if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
435 			continue;
436 
437 		if (j == *chan_num) {
438 			xdma_err(xdev, "invalid channel number");
439 			return -EIO;
440 		}
441 
442 		/* init channel structure and hardware */
443 		xchan = &(*chans)[j];
444 		xchan->xdev_hdl = xdev;
445 		xchan->base = base + i * XDMA_CHAN_STRIDE;
446 		xchan->dir = dir;
447 
448 		ret = xdma_channel_init(xchan);
449 		if (ret)
450 			return ret;
451 		xchan->vchan.desc_free = xdma_free_desc;
452 		vchan_init(&xchan->vchan, &xdev->dma_dev);
453 
454 		j++;
455 	}
456 
457 	dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
458 		 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
459 
460 	return 0;
461 }
462 
463 /**
464  * xdma_issue_pending - Issue pending transactions
465  * @chan: DMA channel pointer
466  */
467 static void xdma_issue_pending(struct dma_chan *chan)
468 {
469 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
473 	if (vchan_issue_pending(&xdma_chan->vchan))
474 		xdma_xfer_start(xdma_chan);
475 	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
476 }
477 
478 /**
479  * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
480  * @chan: DMA channel pointer
481  * @sgl: Transfer scatter gather list
482  * @sg_len: Length of scatter gather list
483  * @dir: Transfer direction
484  * @flags: transfer ack flags
485  * @context: APP words of the descriptor
486  */
487 static struct dma_async_tx_descriptor *
488 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
489 		    unsigned int sg_len, enum dma_transfer_direction dir,
490 		    unsigned long flags, void *context)
491 {
492 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
493 	struct dma_async_tx_descriptor *tx_desc;
494 	u32 desc_num = 0, i, len, rest;
495 	struct xdma_desc_block *dblk;
496 	struct xdma_hw_desc *desc;
497 	struct xdma_desc *sw_desc;
498 	u64 dev_addr, *src, *dst;
499 	struct scatterlist *sg;
500 	u64 addr;
501 
502 	for_each_sg(sgl, sg, sg_len, i)
503 		desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
504 
505 	sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
506 	if (!sw_desc)
507 		return NULL;
508 	sw_desc->dir = dir;
509 
510 	if (dir == DMA_MEM_TO_DEV) {
511 		dev_addr = xdma_chan->cfg.dst_addr;
512 		src = &addr;
513 		dst = &dev_addr;
514 	} else {
515 		dev_addr = xdma_chan->cfg.src_addr;
516 		src = &dev_addr;
517 		dst = &addr;
518 	}
519 
520 	dblk = sw_desc->desc_blocks;
521 	desc = dblk->virt_addr;
522 	desc_num = 1;
523 	for_each_sg(sgl, sg, sg_len, i) {
524 		addr = sg_dma_address(sg);
525 		rest = sg_dma_len(sg);
526 
527 		do {
528 			len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
529 			/* set hardware descriptor */
530 			desc->bytes = cpu_to_le32(len);
531 			desc->src_addr = cpu_to_le64(*src);
532 			desc->dst_addr = cpu_to_le64(*dst);
533 
534 			if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
535 				dblk++;
536 				desc = dblk->virt_addr;
537 			} else {
538 				desc++;
539 			}
540 
541 			desc_num++;
542 			dev_addr += len;
543 			addr += len;
544 			rest -= len;
545 		} while (rest);
546 	}
547 
548 	tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
549 	if (!tx_desc)
550 		goto failed;
551 
552 	return tx_desc;
553 
554 failed:
555 	xdma_free_desc(&sw_desc->vdesc);
556 
557 	return NULL;
558 }
559 
560 /**
561  * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
562  * @chan: DMA channel pointer
563  * @address: Device DMA address to access
564  * @size: Total length to transfer
565  * @period_size: Period size to use for each transfer
566  * @dir: Transfer direction
567  * @flags: Transfer ack flags
568  */
569 static struct dma_async_tx_descriptor *
570 xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
571 		     size_t size, size_t period_size,
572 		     enum dma_transfer_direction dir,
573 		     unsigned long flags)
574 {
575 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
576 	struct xdma_device *xdev = xdma_chan->xdev_hdl;
577 	unsigned int periods = size / period_size;
578 	struct dma_async_tx_descriptor *tx_desc;
579 	struct xdma_desc_block *dblk;
580 	struct xdma_hw_desc *desc;
581 	struct xdma_desc *sw_desc;
582 	unsigned int i;
583 
584 	/*
585 	 * Simplify the whole logic by preventing an abnormally high number of
586 	 * periods and periods size.
587 	 */
588 	if (period_size > XDMA_DESC_BLEN_MAX) {
589 		xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
590 		return NULL;
591 	}
592 
593 	if (periods > XDMA_DESC_ADJACENT) {
594 		xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
595 		return NULL;
596 	}
597 
598 	sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
599 	if (!sw_desc)
600 		return NULL;
601 
602 	sw_desc->periods = periods;
603 	sw_desc->period_size = period_size;
604 	sw_desc->dir = dir;
605 
606 	dblk = sw_desc->desc_blocks;
607 	desc = dblk->virt_addr;
608 
609 	/* fill hardware descriptor */
610 	for (i = 0; i < periods; i++) {
611 		desc->bytes = cpu_to_le32(period_size);
612 		if (dir == DMA_MEM_TO_DEV) {
613 			desc->src_addr = cpu_to_le64(address + i * period_size);
614 			desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
615 		} else {
616 			desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
617 			desc->dst_addr = cpu_to_le64(address + i * period_size);
618 		}
619 
620 		desc++;
621 	}
622 
623 	tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
624 	if (!tx_desc)
625 		goto failed;
626 
627 	return tx_desc;
628 
629 failed:
630 	xdma_free_desc(&sw_desc->vdesc);
631 
632 	return NULL;
633 }
634 
635 /**
636  * xdma_device_config - Configure the DMA channel
637  * @chan: DMA channel
638  * @cfg: channel configuration
639  */
640 static int xdma_device_config(struct dma_chan *chan,
641 			      struct dma_slave_config *cfg)
642 {
643 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
644 
645 	memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
646 
647 	return 0;
648 }
649 
650 /**
651  * xdma_free_chan_resources - Free channel resources
652  * @chan: DMA channel
653  */
654 static void xdma_free_chan_resources(struct dma_chan *chan)
655 {
656 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
657 
658 	vchan_free_chan_resources(&xdma_chan->vchan);
659 	dma_pool_destroy(xdma_chan->desc_pool);
660 	xdma_chan->desc_pool = NULL;
661 }
662 
663 /**
664  * xdma_alloc_chan_resources - Allocate channel resources
665  * @chan: DMA channel
666  */
667 static int xdma_alloc_chan_resources(struct dma_chan *chan)
668 {
669 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
670 	struct xdma_device *xdev = xdma_chan->xdev_hdl;
671 	struct device *dev = xdev->dma_dev.dev;
672 
673 	while (dev && !dev_is_pci(dev))
674 		dev = dev->parent;
675 	if (!dev) {
676 		xdma_err(xdev, "unable to find pci device");
677 		return -EINVAL;
678 	}
679 
680 	xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
681 					       dev, XDMA_DESC_BLOCK_SIZE,
682 					       XDMA_DESC_BLOCK_ALIGN, 0);
683 	if (!xdma_chan->desc_pool) {
684 		xdma_err(xdev, "unable to allocate descriptor pool");
685 		return -ENOMEM;
686 	}
687 
688 	return 0;
689 }
690 
691 static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
692 				      struct dma_tx_state *state)
693 {
694 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
695 	struct xdma_desc *desc = NULL;
696 	struct virt_dma_desc *vd;
697 	enum dma_status ret;
698 	unsigned long flags;
699 	unsigned int period_idx;
700 	u32 residue = 0;
701 
702 	ret = dma_cookie_status(chan, cookie, state);
703 	if (ret == DMA_COMPLETE)
704 		return ret;
705 
706 	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
707 
708 	vd = vchan_find_desc(&xdma_chan->vchan, cookie);
709 	if (vd)
710 		desc = to_xdma_desc(vd);
711 	if (!desc || !desc->cyclic) {
712 		spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
713 		return ret;
714 	}
715 
716 	period_idx = desc->completed_desc_num % desc->periods;
717 	residue = (desc->periods - period_idx) * desc->period_size;
718 
719 	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
720 
721 	dma_set_residue(state, residue);
722 
723 	return ret;
724 }
725 
726 /**
727  * xdma_channel_isr - XDMA channel interrupt handler
728  * @irq: IRQ number
729  * @dev_id: Pointer to the DMA channel structure
730  */
731 static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
732 {
733 	struct xdma_chan *xchan = dev_id;
734 	u32 complete_desc_num = 0;
735 	struct xdma_device *xdev;
736 	struct virt_dma_desc *vd;
737 	struct xdma_desc *desc;
738 	int ret;
739 	u32 st;
740 
741 	spin_lock(&xchan->vchan.lock);
742 
743 	/* get submitted request */
744 	vd = vchan_next_desc(&xchan->vchan);
745 	if (!vd)
746 		goto out;
747 
748 	xchan->busy = false;
749 	desc = to_xdma_desc(vd);
750 	xdev = xchan->xdev_hdl;
751 
752 	ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
753 			  &complete_desc_num);
754 	if (ret)
755 		goto out;
756 
757 	desc->completed_desc_num += complete_desc_num;
758 
759 	if (desc->cyclic) {
760 		ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
761 				  &st);
762 		if (ret)
763 			goto out;
764 
765 		regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
766 
767 		vchan_cyclic_callback(vd);
768 		goto out;
769 	}
770 
771 	/*
772 	 * if all data blocks are transferred, remove and complete the request
773 	 */
774 	if (desc->completed_desc_num == desc->desc_num) {
775 		list_del(&vd->node);
776 		vchan_cookie_complete(vd);
777 		goto out;
778 	}
779 
780 	if (desc->completed_desc_num > desc->desc_num ||
781 	    complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
782 		goto out;
783 
784 	/* transfer the rest of data (SG only) */
785 	xdma_xfer_start(xchan);
786 
787 out:
788 	spin_unlock(&xchan->vchan.lock);
789 	return IRQ_HANDLED;
790 }
791 
792 /**
793  * xdma_irq_fini - Uninitialize IRQ
794  * @xdev: DMA device pointer
795  */
796 static void xdma_irq_fini(struct xdma_device *xdev)
797 {
798 	int i;
799 
800 	/* disable interrupt */
801 	regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
802 
803 	/* free irq handler */
804 	for (i = 0; i < xdev->h2c_chan_num; i++)
805 		free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
806 
807 	for (i = 0; i < xdev->c2h_chan_num; i++)
808 		free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
809 }
810 
811 /**
812  * xdma_set_vector_reg - configure hardware IRQ registers
813  * @xdev: DMA device pointer
814  * @vec_tbl_start: Start of IRQ registers
815  * @irq_start: Start of IRQ
816  * @irq_num: Number of IRQ
817  */
818 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
819 			       u32 irq_start, u32 irq_num)
820 {
821 	u32 shift, i, val = 0;
822 	int ret;
823 
824 	/* Each IRQ register is 32 bit and contains 4 IRQs */
825 	while (irq_num > 0) {
826 		for (i = 0; i < 4; i++) {
827 			shift = XDMA_IRQ_VEC_SHIFT * i;
828 			val |= irq_start << shift;
829 			irq_start++;
830 			irq_num--;
831 			if (!irq_num)
832 				break;
833 		}
834 
835 		/* write IRQ register */
836 		ret = regmap_write(xdev->rmap, vec_tbl_start, val);
837 		if (ret)
838 			return ret;
839 		vec_tbl_start += sizeof(u32);
840 		val = 0;
841 	}
842 
843 	return 0;
844 }
845 
846 /**
847  * xdma_irq_init - initialize IRQs
848  * @xdev: DMA device pointer
849  */
850 static int xdma_irq_init(struct xdma_device *xdev)
851 {
852 	u32 irq = xdev->irq_start;
853 	u32 user_irq_start;
854 	int i, j, ret;
855 
856 	/* return failure if there are not enough IRQs */
857 	if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
858 		xdma_err(xdev, "not enough irq");
859 		return -EINVAL;
860 	}
861 
862 	/* setup H2C interrupt handler */
863 	for (i = 0; i < xdev->h2c_chan_num; i++) {
864 		ret = request_irq(irq, xdma_channel_isr, 0,
865 				  "xdma-h2c-channel", &xdev->h2c_chans[i]);
866 		if (ret) {
867 			xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
868 				 i, irq, ret);
869 			goto failed_init_h2c;
870 		}
871 		xdev->h2c_chans[i].irq = irq;
872 		irq++;
873 	}
874 
875 	/* setup C2H interrupt handler */
876 	for (j = 0; j < xdev->c2h_chan_num; j++) {
877 		ret = request_irq(irq, xdma_channel_isr, 0,
878 				  "xdma-c2h-channel", &xdev->c2h_chans[j]);
879 		if (ret) {
880 			xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
881 				 j, irq, ret);
882 			goto failed_init_c2h;
883 		}
884 		xdev->c2h_chans[j].irq = irq;
885 		irq++;
886 	}
887 
888 	/* config hardware IRQ registers */
889 	ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
890 				  XDMA_CHAN_NUM(xdev));
891 	if (ret) {
892 		xdma_err(xdev, "failed to set channel vectors: %d", ret);
893 		goto failed_init_c2h;
894 	}
895 
896 	/* config user IRQ registers if needed */
897 	user_irq_start = XDMA_CHAN_NUM(xdev);
898 	if (xdev->irq_num > user_irq_start) {
899 		ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
900 					  user_irq_start,
901 					  xdev->irq_num - user_irq_start);
902 		if (ret) {
903 			xdma_err(xdev, "failed to set user vectors: %d", ret);
904 			goto failed_init_c2h;
905 		}
906 	}
907 
908 	/* enable interrupt */
909 	ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
910 	if (ret)
911 		goto failed_init_c2h;
912 
913 	return 0;
914 
915 failed_init_c2h:
916 	while (j--)
917 		free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
918 failed_init_h2c:
919 	while (i--)
920 		free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
921 
922 	return ret;
923 }
924 
925 static bool xdma_filter_fn(struct dma_chan *chan, void *param)
926 {
927 	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
928 	struct xdma_chan_info *chan_info = param;
929 
930 	return chan_info->dir == xdma_chan->dir;
931 }
932 
933 /**
934  * xdma_disable_user_irq - Disable user interrupt
935  * @pdev: Pointer to the platform_device structure
936  * @irq_num: System IRQ number
937  */
938 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
939 {
940 	struct xdma_device *xdev = platform_get_drvdata(pdev);
941 	u32 index;
942 
943 	index = irq_num - xdev->irq_start;
944 	if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
945 		xdma_err(xdev, "invalid user irq number");
946 		return;
947 	}
948 	index -= XDMA_CHAN_NUM(xdev);
949 
950 	regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
951 }
952 EXPORT_SYMBOL(xdma_disable_user_irq);
953 
954 /**
955  * xdma_enable_user_irq - Enable user logic interrupt
956  * @pdev: Pointer to the platform_device structure
957  * @irq_num: System IRQ number
958  */
959 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
960 {
961 	struct xdma_device *xdev = platform_get_drvdata(pdev);
962 	u32 index;
963 	int ret;
964 
965 	index = irq_num - xdev->irq_start;
966 	if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
967 		xdma_err(xdev, "invalid user irq number");
968 		return -EINVAL;
969 	}
970 	index -= XDMA_CHAN_NUM(xdev);
971 
972 	ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
973 	if (ret)
974 		return ret;
975 
976 	return 0;
977 }
978 EXPORT_SYMBOL(xdma_enable_user_irq);
979 
980 /**
981  * xdma_get_user_irq - Get system IRQ number
982  * @pdev: Pointer to the platform_device structure
983  * @user_irq_index: User logic IRQ wire index
984  *
985  * Return: The system IRQ number allocated for the given wire index.
986  */
987 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
988 {
989 	struct xdma_device *xdev = platform_get_drvdata(pdev);
990 
991 	if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
992 		xdma_err(xdev, "invalid user irq index");
993 		return -EINVAL;
994 	}
995 
996 	return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
997 }
998 EXPORT_SYMBOL(xdma_get_user_irq);
999 
1000 /**
1001  * xdma_remove - Driver remove function
1002  * @pdev: Pointer to the platform_device structure
1003  */
1004 static void xdma_remove(struct platform_device *pdev)
1005 {
1006 	struct xdma_device *xdev = platform_get_drvdata(pdev);
1007 
1008 	if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
1009 		xdma_irq_fini(xdev);
1010 
1011 	if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
1012 		dma_async_device_unregister(&xdev->dma_dev);
1013 }
1014 
1015 /**
1016  * xdma_probe - Driver probe function
1017  * @pdev: Pointer to the platform_device structure
1018  */
1019 static int xdma_probe(struct platform_device *pdev)
1020 {
1021 	struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1022 	struct xdma_device *xdev;
1023 	void __iomem *reg_base;
1024 	struct resource *res;
1025 	int ret = -ENODEV;
1026 
1027 	if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
1028 		dev_err(&pdev->dev, "invalid max dma channels %d",
1029 			pdata->max_dma_channels);
1030 		return -EINVAL;
1031 	}
1032 
1033 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1034 	if (!xdev)
1035 		return -ENOMEM;
1036 
1037 	platform_set_drvdata(pdev, xdev);
1038 	xdev->pdev = pdev;
1039 
1040 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1041 	if (!res) {
1042 		xdma_err(xdev, "failed to get irq resource");
1043 		goto failed;
1044 	}
1045 	xdev->irq_start = res->start;
1046 	xdev->irq_num = resource_size(res);
1047 
1048 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1049 	if (!res) {
1050 		xdma_err(xdev, "failed to get io resource");
1051 		goto failed;
1052 	}
1053 
1054 	reg_base = devm_ioremap_resource(&pdev->dev, res);
1055 	if (IS_ERR(reg_base)) {
1056 		xdma_err(xdev, "ioremap failed");
1057 		goto failed;
1058 	}
1059 
1060 	xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
1061 					   &xdma_regmap_config);
1062 	if (!xdev->rmap) {
1063 		xdma_err(xdev, "config regmap failed: %d", ret);
1064 		goto failed;
1065 	}
1066 	INIT_LIST_HEAD(&xdev->dma_dev.channels);
1067 
1068 	ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
1069 	if (ret) {
1070 		xdma_err(xdev, "config H2C channels failed: %d", ret);
1071 		goto failed;
1072 	}
1073 
1074 	ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
1075 	if (ret) {
1076 		xdma_err(xdev, "config C2H channels failed: %d", ret);
1077 		goto failed;
1078 	}
1079 
1080 	dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
1081 	dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
1082 	dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
1083 
1084 	xdev->dma_dev.dev = &pdev->dev;
1085 	xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1086 	xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
1087 	xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
1088 	xdev->dma_dev.device_tx_status = xdma_tx_status;
1089 	xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
1090 	xdev->dma_dev.device_config = xdma_device_config;
1091 	xdev->dma_dev.device_issue_pending = xdma_issue_pending;
1092 	xdev->dma_dev.filter.map = pdata->device_map;
1093 	xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
1094 	xdev->dma_dev.filter.fn = xdma_filter_fn;
1095 	xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
1096 
1097 	ret = dma_async_device_register(&xdev->dma_dev);
1098 	if (ret) {
1099 		xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
1100 		goto failed;
1101 	}
1102 	xdev->status |= XDMA_DEV_STATUS_REG_DMA;
1103 
1104 	ret = xdma_irq_init(xdev);
1105 	if (ret) {
1106 		xdma_err(xdev, "failed to init msix: %d", ret);
1107 		goto failed;
1108 	}
1109 	xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
1110 
1111 	return 0;
1112 
1113 failed:
1114 	xdma_remove(pdev);
1115 
1116 	return ret;
1117 }
1118 
1119 static const struct platform_device_id xdma_id_table[] = {
1120 	{ "xdma", 0},
1121 	{ },
1122 };
1123 
1124 static struct platform_driver xdma_driver = {
1125 	.driver		= {
1126 		.name = "xdma",
1127 	},
1128 	.id_table	= xdma_id_table,
1129 	.probe		= xdma_probe,
1130 	.remove_new	= xdma_remove,
1131 };
1132 
1133 module_platform_driver(xdma_driver);
1134 
1135 MODULE_DESCRIPTION("AMD XDMA driver");
1136 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1137 MODULE_LICENSE("GPL");
1138