xref: /linux/drivers/dma/mv_xor_v2.c (revision f2bf88c4afc8c5ab92b40af24819933e57d0968c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2015-2016 Marvell International Ltd.
4 
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_irq.h>
15 #include <linux/platform_device.h>
16 #include <linux/spinlock.h>
17 
18 #include "dmaengine.h"
19 
20 /* DMA Engine Registers */
21 #define MV_XOR_V2_DMA_DESQ_BALR_OFF			0x000
22 #define MV_XOR_V2_DMA_DESQ_BAHR_OFF			0x004
23 #define MV_XOR_V2_DMA_DESQ_SIZE_OFF			0x008
24 #define MV_XOR_V2_DMA_DESQ_DONE_OFF			0x00C
25 #define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK		0x7FFF
26 #define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT		0
27 #define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK		0x1FFF
28 #define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT	16
29 #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF			0x010
30 #define   MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK		0x3F3F
31 #define   MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE	0x202
32 #define   MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE		0x3C3C
33 #define MV_XOR_V2_DMA_IMSG_CDAT_OFF			0x014
34 #define MV_XOR_V2_DMA_IMSG_THRD_OFF			0x018
35 #define   MV_XOR_V2_DMA_IMSG_THRD_MASK			0x7FFF
36 #define   MV_XOR_V2_DMA_IMSG_TIMER_EN			BIT(18)
37 #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF			0x01C
38   /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
39 #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF			0x04C
40 #define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK		0xFFFF
41 #define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT		16
42 #define MV_XOR_V2_DMA_IMSG_BALR_OFF			0x050
43 #define MV_XOR_V2_DMA_IMSG_BAHR_OFF			0x054
44 #define MV_XOR_V2_DMA_DESQ_CTRL_OFF			0x100
45 #define	  MV_XOR_V2_DMA_DESQ_CTRL_32B			1
46 #define   MV_XOR_V2_DMA_DESQ_CTRL_128B			7
47 #define MV_XOR_V2_DMA_DESQ_STOP_OFF			0x800
48 #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF			0x804
49 #define MV_XOR_V2_DMA_DESQ_ADD_OFF			0x808
50 #define MV_XOR_V2_DMA_IMSG_TMOT				0x810
51 #define   MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK		0x1FFF
52 
53 /* XOR Global registers */
54 #define MV_XOR_V2_GLOB_BW_CTRL				0x4
55 #define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT	0
56 #define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL	64
57 #define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT	8
58 #define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL	8
59 #define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT	12
60 #define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL	4
61 #define   MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT	16
62 #define	  MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL	4
63 #define MV_XOR_V2_GLOB_PAUSE				0x014
64 #define   MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL		0x8
65 #define MV_XOR_V2_GLOB_SYS_INT_CAUSE			0x200
66 #define MV_XOR_V2_GLOB_SYS_INT_MASK			0x204
67 #define MV_XOR_V2_GLOB_MEM_INT_CAUSE			0x220
68 #define MV_XOR_V2_GLOB_MEM_INT_MASK			0x224
69 
70 #define MV_XOR_V2_MIN_DESC_SIZE				32
71 #define MV_XOR_V2_EXT_DESC_SIZE				128
72 
73 #define MV_XOR_V2_DESC_RESERVED_SIZE			12
74 #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE			12
75 
76 #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF		8
77 
78 /*
79  * Descriptors queue size. With 32 bytes descriptors, up to 2^14
80  * descriptors are allowed, with 128 bytes descriptors, up to 2^12
81  * descriptors are allowed. This driver uses 128 bytes descriptors,
82  * but experimentation has shown that a set of 1024 descriptors is
83  * sufficient to reach a good level of performance.
84  */
85 #define MV_XOR_V2_DESC_NUM				1024
86 
87 /*
88  * Threshold values for descriptors and timeout, determined by
89  * experimentation as giving a good level of performance.
90  */
91 #define MV_XOR_V2_DONE_IMSG_THRD  0x14
92 #define MV_XOR_V2_TIMER_THRD      0xB0
93 
94 /**
95  * struct mv_xor_v2_descriptor - DMA HW descriptor
96  * @desc_id: used by S/W and is not affected by H/W.
97  * @flags: error and status flags
98  * @crc32_result: CRC32 calculation result
99  * @desc_ctrl: operation mode and control flags
100  * @buff_size: amount of bytes to be processed
101  * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
102  * AW-Attributes
103  * @data_buff_addr: Source (and might be RAID6 destination)
104  * addresses of data buffers in RAID5 and RAID6
105  * @reserved: reserved
106  */
107 struct mv_xor_v2_descriptor {
108 	u16 desc_id;
109 	u16 flags;
110 	u32 crc32_result;
111 	u32 desc_ctrl;
112 
113 	/* Definitions for desc_ctrl */
114 #define DESC_NUM_ACTIVE_D_BUF_SHIFT	22
115 #define DESC_OP_MODE_SHIFT		28
116 #define DESC_OP_MODE_NOP		0	/* Idle operation */
117 #define DESC_OP_MODE_MEMCPY		1	/* Pure-DMA operation */
118 #define DESC_OP_MODE_MEMSET		2	/* Mem-Fill operation */
119 #define DESC_OP_MODE_MEMINIT		3	/* Mem-Init operation */
120 #define DESC_OP_MODE_MEM_COMPARE	4	/* Mem-Compare operation */
121 #define DESC_OP_MODE_CRC32		5	/* CRC32 calculation */
122 #define DESC_OP_MODE_XOR		6	/* RAID5 (XOR) operation */
123 #define DESC_OP_MODE_RAID6		7	/* RAID6 P&Q-generation */
124 #define DESC_OP_MODE_RAID6_REC		8	/* RAID6 Recovery */
125 #define DESC_Q_BUFFER_ENABLE		BIT(16)
126 #define DESC_P_BUFFER_ENABLE		BIT(17)
127 #define DESC_IOD			BIT(27)
128 
129 	u32 buff_size;
130 	u32 fill_pattern_src_addr[4];
131 	u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
132 	u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
133 };
134 
135 /**
136  * struct mv_xor_v2_device - implements a xor device
137  * @lock: lock for the engine
138  * @clk: reference to the 'core' clock
139  * @reg_clk: reference to the 'reg' clock
140  * @dma_base: memory mapped DMA register base
141  * @glob_base: memory mapped global register base
142  * @irq_tasklet: tasklet used for IRQ handling call-backs
143  * @free_sw_desc: linked list of free SW descriptors
144  * @dmadev: dma device
145  * @dmachan: dma channel
146  * @hw_desq: HW descriptors queue
147  * @hw_desq_virt: virtual address of DESCQ
148  * @sw_desq: SW descriptors queue
149  * @desc_size: HW descriptor size
150  * @npendings: number of pending descriptors (for which tx_submit has
151  * @hw_queue_idx: HW queue index
152  * @msi_desc: local interrupt descriptor information
153  * been called, but not yet issue_pending)
154  */
155 struct mv_xor_v2_device {
156 	spinlock_t lock;
157 	void __iomem *dma_base;
158 	void __iomem *glob_base;
159 	struct clk *clk;
160 	struct clk *reg_clk;
161 	struct tasklet_struct irq_tasklet;
162 	struct list_head free_sw_desc;
163 	struct dma_device dmadev;
164 	struct dma_chan	dmachan;
165 	dma_addr_t hw_desq;
166 	struct mv_xor_v2_descriptor *hw_desq_virt;
167 	struct mv_xor_v2_sw_desc *sw_desq;
168 	int desc_size;
169 	unsigned int npendings;
170 	unsigned int hw_queue_idx;
171 	struct msi_desc *msi_desc;
172 };
173 
174 /**
175  * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
176  * @idx: descriptor index
177  * @async_tx: support for the async_tx api
178  * @hw_desc: assosiated HW descriptor
179  * @free_list: node of the free SW descriprots list
180 */
181 struct mv_xor_v2_sw_desc {
182 	int idx;
183 	struct dma_async_tx_descriptor async_tx;
184 	struct mv_xor_v2_descriptor hw_desc;
185 	struct list_head free_list;
186 };
187 
188 /*
189  * Fill the data buffers to a HW descriptor
190  */
191 static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
192 					struct mv_xor_v2_descriptor *desc,
193 					dma_addr_t src, int index)
194 {
195 	int arr_index = ((index >> 1) * 3);
196 
197 	/*
198 	 * Fill the buffer's addresses to the descriptor.
199 	 *
200 	 * The format of the buffers address for 2 sequential buffers
201 	 * X and X + 1:
202 	 *
203 	 *  First word:  Buffer-DX-Address-Low[31:0]
204 	 *  Second word: Buffer-DX+1-Address-Low[31:0]
205 	 *  Third word:  DX+1-Buffer-Address-High[47:32] [31:16]
206 	 *		 DX-Buffer-Address-High[47:32] [15:0]
207 	 */
208 	if ((index & 0x1) == 0) {
209 		desc->data_buff_addr[arr_index] = lower_32_bits(src);
210 
211 		desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
212 		desc->data_buff_addr[arr_index + 2] |=
213 			upper_32_bits(src) & 0xFFFF;
214 	} else {
215 		desc->data_buff_addr[arr_index + 1] =
216 			lower_32_bits(src);
217 
218 		desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
219 		desc->data_buff_addr[arr_index + 2] |=
220 			(upper_32_bits(src) & 0xFFFF) << 16;
221 	}
222 }
223 
224 /*
225  * notify the engine of new descriptors, and update the available index.
226  */
227 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
228 				       int num_of_desc)
229 {
230 	/* write the number of new descriptors in the DESQ. */
231 	writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
232 }
233 
234 /*
235  * free HW descriptors
236  */
237 static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
238 					  int num_of_desc)
239 {
240 	/* write the number of new descriptors in the DESQ. */
241 	writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
242 }
243 
244 /*
245  * Set descriptor size
246  * Return the HW descriptor size in bytes
247  */
248 static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
249 {
250 	writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
251 	       xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
252 
253 	return MV_XOR_V2_EXT_DESC_SIZE;
254 }
255 
256 /*
257  * Set the IMSG threshold
258  */
259 static inline
260 void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
261 {
262 	u32 reg;
263 
264 	/* Configure threshold of number of descriptors, and enable timer */
265 	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
266 	reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK;
267 	reg |= MV_XOR_V2_DONE_IMSG_THRD;
268 	reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
269 	writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
270 
271 	/* Configure Timer Threshold */
272 	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
273 	reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK;
274 	reg |= MV_XOR_V2_TIMER_THRD;
275 	writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
276 }
277 
278 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
279 {
280 	struct mv_xor_v2_device *xor_dev = data;
281 	unsigned int ndescs;
282 	u32 reg;
283 
284 	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
285 
286 	ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
287 		  MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
288 
289 	/* No descriptors to process */
290 	if (!ndescs)
291 		return IRQ_NONE;
292 
293 	/* schedule a tasklet to handle descriptors callbacks */
294 	tasklet_schedule(&xor_dev->irq_tasklet);
295 
296 	return IRQ_HANDLED;
297 }
298 
299 /*
300  * submit a descriptor to the DMA engine
301  */
302 static dma_cookie_t
303 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
304 {
305 	void *dest_hw_desc;
306 	dma_cookie_t cookie;
307 	struct mv_xor_v2_sw_desc *sw_desc =
308 		container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
309 	struct mv_xor_v2_device *xor_dev =
310 		container_of(tx->chan, struct mv_xor_v2_device, dmachan);
311 
312 	dev_dbg(xor_dev->dmadev.dev,
313 		"%s sw_desc %p: async_tx %p\n",
314 		__func__, sw_desc, &sw_desc->async_tx);
315 
316 	/* assign coookie */
317 	spin_lock_bh(&xor_dev->lock);
318 	cookie = dma_cookie_assign(tx);
319 
320 	/* copy the HW descriptor from the SW descriptor to the DESQ */
321 	dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
322 
323 	memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
324 
325 	xor_dev->npendings++;
326 	xor_dev->hw_queue_idx++;
327 	if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
328 		xor_dev->hw_queue_idx = 0;
329 
330 	spin_unlock_bh(&xor_dev->lock);
331 
332 	return cookie;
333 }
334 
335 /*
336  * Prepare a SW descriptor
337  */
338 static struct mv_xor_v2_sw_desc	*
339 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
340 {
341 	struct mv_xor_v2_sw_desc *sw_desc;
342 	bool found = false;
343 
344 	/* Lock the channel */
345 	spin_lock_bh(&xor_dev->lock);
346 
347 	if (list_empty(&xor_dev->free_sw_desc)) {
348 		spin_unlock_bh(&xor_dev->lock);
349 		/* schedule tasklet to free some descriptors */
350 		tasklet_schedule(&xor_dev->irq_tasklet);
351 		return NULL;
352 	}
353 
354 	list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
355 		if (async_tx_test_ack(&sw_desc->async_tx)) {
356 			found = true;
357 			break;
358 		}
359 	}
360 
361 	if (!found) {
362 		spin_unlock_bh(&xor_dev->lock);
363 		return NULL;
364 	}
365 
366 	list_del(&sw_desc->free_list);
367 
368 	/* Release the channel */
369 	spin_unlock_bh(&xor_dev->lock);
370 
371 	return sw_desc;
372 }
373 
374 /*
375  * Prepare a HW descriptor for a memcpy operation
376  */
377 static struct dma_async_tx_descriptor *
378 mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
379 			  dma_addr_t src, size_t len, unsigned long flags)
380 {
381 	struct mv_xor_v2_sw_desc *sw_desc;
382 	struct mv_xor_v2_descriptor *hw_descriptor;
383 	struct mv_xor_v2_device	*xor_dev;
384 
385 	xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
386 
387 	dev_dbg(xor_dev->dmadev.dev,
388 		"%s len: %zu src %pad dest %pad flags: %ld\n",
389 		__func__, len, &src, &dest, flags);
390 
391 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
392 	if (!sw_desc)
393 		return NULL;
394 
395 	sw_desc->async_tx.flags = flags;
396 
397 	/* set the HW descriptor */
398 	hw_descriptor = &sw_desc->hw_desc;
399 
400 	/* save the SW descriptor ID to restore when operation is done */
401 	hw_descriptor->desc_id = sw_desc->idx;
402 
403 	/* Set the MEMCPY control word */
404 	hw_descriptor->desc_ctrl =
405 		DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
406 
407 	if (flags & DMA_PREP_INTERRUPT)
408 		hw_descriptor->desc_ctrl |= DESC_IOD;
409 
410 	/* Set source address */
411 	hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
412 	hw_descriptor->fill_pattern_src_addr[1] =
413 		upper_32_bits(src) & 0xFFFF;
414 
415 	/* Set Destination address */
416 	hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
417 	hw_descriptor->fill_pattern_src_addr[3] =
418 		upper_32_bits(dest) & 0xFFFF;
419 
420 	/* Set buffers size */
421 	hw_descriptor->buff_size = len;
422 
423 	/* return the async tx descriptor */
424 	return &sw_desc->async_tx;
425 }
426 
427 /*
428  * Prepare a HW descriptor for a XOR operation
429  */
430 static struct dma_async_tx_descriptor *
431 mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
432 		       unsigned int src_cnt, size_t len, unsigned long flags)
433 {
434 	struct mv_xor_v2_sw_desc *sw_desc;
435 	struct mv_xor_v2_descriptor *hw_descriptor;
436 	struct mv_xor_v2_device	*xor_dev =
437 		container_of(chan, struct mv_xor_v2_device, dmachan);
438 	int i;
439 
440 	if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
441 		return NULL;
442 
443 	dev_dbg(xor_dev->dmadev.dev,
444 		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
445 		__func__, src_cnt, len, &dest, flags);
446 
447 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
448 	if (!sw_desc)
449 		return NULL;
450 
451 	sw_desc->async_tx.flags = flags;
452 
453 	/* set the HW descriptor */
454 	hw_descriptor = &sw_desc->hw_desc;
455 
456 	/* save the SW descriptor ID to restore when operation is done */
457 	hw_descriptor->desc_id = sw_desc->idx;
458 
459 	/* Set the XOR control word */
460 	hw_descriptor->desc_ctrl =
461 		DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
462 	hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
463 
464 	if (flags & DMA_PREP_INTERRUPT)
465 		hw_descriptor->desc_ctrl |= DESC_IOD;
466 
467 	/* Set the data buffers */
468 	for (i = 0; i < src_cnt; i++)
469 		mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
470 
471 	hw_descriptor->desc_ctrl |=
472 		src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
473 
474 	/* Set Destination address */
475 	hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
476 	hw_descriptor->fill_pattern_src_addr[3] =
477 		upper_32_bits(dest) & 0xFFFF;
478 
479 	/* Set buffers size */
480 	hw_descriptor->buff_size = len;
481 
482 	/* return the async tx descriptor */
483 	return &sw_desc->async_tx;
484 }
485 
486 /*
487  * Prepare a HW descriptor for interrupt operation.
488  */
489 static struct dma_async_tx_descriptor *
490 mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
491 {
492 	struct mv_xor_v2_sw_desc *sw_desc;
493 	struct mv_xor_v2_descriptor *hw_descriptor;
494 	struct mv_xor_v2_device	*xor_dev =
495 		container_of(chan, struct mv_xor_v2_device, dmachan);
496 
497 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
498 	if (!sw_desc)
499 		return NULL;
500 
501 	/* set the HW descriptor */
502 	hw_descriptor = &sw_desc->hw_desc;
503 
504 	/* save the SW descriptor ID to restore when operation is done */
505 	hw_descriptor->desc_id = sw_desc->idx;
506 
507 	/* Set the INTERRUPT control word */
508 	hw_descriptor->desc_ctrl =
509 		DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
510 	hw_descriptor->desc_ctrl |= DESC_IOD;
511 
512 	/* return the async tx descriptor */
513 	return &sw_desc->async_tx;
514 }
515 
516 /*
517  * push pending transactions to hardware
518  */
519 static void mv_xor_v2_issue_pending(struct dma_chan *chan)
520 {
521 	struct mv_xor_v2_device *xor_dev =
522 		container_of(chan, struct mv_xor_v2_device, dmachan);
523 
524 	spin_lock_bh(&xor_dev->lock);
525 
526 	/*
527 	 * update the engine with the number of descriptors to
528 	 * process
529 	 */
530 	mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
531 	xor_dev->npendings = 0;
532 
533 	spin_unlock_bh(&xor_dev->lock);
534 }
535 
536 static inline
537 int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
538 				 int *pending_ptr)
539 {
540 	u32 reg;
541 
542 	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
543 
544 	/* get the next pending descriptor index */
545 	*pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
546 			MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
547 
548 	/* get the number of descriptors pending handle */
549 	return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
550 		MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
551 }
552 
553 /*
554  * handle the descriptors after HW process
555  */
556 static void mv_xor_v2_tasklet(unsigned long data)
557 {
558 	struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
559 	int pending_ptr, num_of_pending, i;
560 	struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
561 
562 	dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
563 
564 	/* get the pending descriptors parameters */
565 	num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
566 
567 	/* loop over free descriptors */
568 	for (i = 0; i < num_of_pending; i++) {
569 		struct mv_xor_v2_descriptor *next_pending_hw_desc =
570 			xor_dev->hw_desq_virt + pending_ptr;
571 
572 		/* get the SW descriptor related to the HW descriptor */
573 		next_pending_sw_desc =
574 			&xor_dev->sw_desq[next_pending_hw_desc->desc_id];
575 
576 		/* call the callback */
577 		if (next_pending_sw_desc->async_tx.cookie > 0) {
578 			/*
579 			 * update the channel's completed cookie - no
580 			 * lock is required the IMSG threshold provide
581 			 * the locking
582 			 */
583 			dma_cookie_complete(&next_pending_sw_desc->async_tx);
584 
585 			dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
586 			dmaengine_desc_get_callback_invoke(
587 					&next_pending_sw_desc->async_tx, NULL);
588 		}
589 
590 		dma_run_dependencies(&next_pending_sw_desc->async_tx);
591 
592 		/* Lock the channel */
593 		spin_lock_bh(&xor_dev->lock);
594 
595 		/* add the SW descriptor to the free descriptors list */
596 		list_add(&next_pending_sw_desc->free_list,
597 			 &xor_dev->free_sw_desc);
598 
599 		/* Release the channel */
600 		spin_unlock_bh(&xor_dev->lock);
601 
602 		/* increment the next descriptor */
603 		pending_ptr++;
604 		if (pending_ptr >= MV_XOR_V2_DESC_NUM)
605 			pending_ptr = 0;
606 	}
607 
608 	if (num_of_pending != 0) {
609 		/* free the descriptores */
610 		mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
611 	}
612 }
613 
614 /*
615  *	Set DMA Interrupt-message (IMSG) parameters
616  */
617 static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
618 {
619 	struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
620 
621 	writel(msg->address_lo,
622 	       xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
623 	writel(msg->address_hi & 0xFFFF,
624 	       xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
625 	writel(msg->data,
626 	       xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
627 }
628 
629 static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
630 {
631 	u32 reg;
632 
633 	/* write the DESQ size to the DMA engine */
634 	writel(MV_XOR_V2_DESC_NUM,
635 	       xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
636 
637 	/* write the DESQ address to the DMA enngine*/
638 	writel(lower_32_bits(xor_dev->hw_desq),
639 	       xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
640 	writel(upper_32_bits(xor_dev->hw_desq),
641 	       xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
642 
643 	/*
644 	 * This is a temporary solution, until we activate the
645 	 * SMMU. Set the attributes for reading & writing data buffers
646 	 * & descriptors to:
647 	 *
648 	 *  - OuterShareable - Snoops will be performed on CPU caches
649 	 *  - Enable cacheable - Bufferable, Modifiable, Other Allocate
650 	 *    and Allocate
651 	 */
652 	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
653 	reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
654 	reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
655 		MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
656 	writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
657 
658 	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
659 	reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
660 	reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
661 		MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
662 	writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
663 
664 	/* BW CTRL - set values to optimize the XOR performance:
665 	 *
666 	 *  - Set WrBurstLen & RdBurstLen - the unit will issue
667 	 *    maximum of 256B write/read transactions.
668 	 * -  Limit the number of outstanding write & read data
669 	 *    (OBB/IBB) requests to the maximal value.
670 	*/
671 	reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
672 		MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
673 	       (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL  <<
674 		MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
675 	       (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
676 		MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
677 	       (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
678 		MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
679 	writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
680 
681 	/* Disable the AXI timer feature */
682 	reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
683 	reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
684 	writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
685 
686 	/* enable the DMA engine */
687 	writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
688 
689 	return 0;
690 }
691 
692 static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
693 {
694 	struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
695 
696 	/* Set this bit to disable to stop the XOR unit. */
697 	writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
698 
699 	return 0;
700 }
701 
702 static int mv_xor_v2_resume(struct platform_device *dev)
703 {
704 	struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
705 
706 	mv_xor_v2_set_desc_size(xor_dev);
707 	mv_xor_v2_enable_imsg_thrd(xor_dev);
708 	mv_xor_v2_descq_init(xor_dev);
709 
710 	return 0;
711 }
712 
713 static int mv_xor_v2_probe(struct platform_device *pdev)
714 {
715 	struct mv_xor_v2_device *xor_dev;
716 	struct resource *res;
717 	int i, ret = 0;
718 	struct dma_device *dma_dev;
719 	struct mv_xor_v2_sw_desc *sw_desc;
720 	struct msi_desc *msi_desc;
721 
722 	BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
723 		     MV_XOR_V2_EXT_DESC_SIZE);
724 
725 	xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
726 	if (!xor_dev)
727 		return -ENOMEM;
728 
729 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
730 	xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
731 	if (IS_ERR(xor_dev->dma_base))
732 		return PTR_ERR(xor_dev->dma_base);
733 
734 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
735 	xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
736 	if (IS_ERR(xor_dev->glob_base))
737 		return PTR_ERR(xor_dev->glob_base);
738 
739 	platform_set_drvdata(pdev, xor_dev);
740 
741 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
742 	if (ret)
743 		return ret;
744 
745 	xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
746 	if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
747 		if (!IS_ERR(xor_dev->reg_clk)) {
748 			ret = clk_prepare_enable(xor_dev->reg_clk);
749 			if (ret)
750 				return ret;
751 		} else {
752 			return PTR_ERR(xor_dev->reg_clk);
753 		}
754 	}
755 
756 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
757 	if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
758 		ret = EPROBE_DEFER;
759 		goto disable_reg_clk;
760 	}
761 	if (!IS_ERR(xor_dev->clk)) {
762 		ret = clk_prepare_enable(xor_dev->clk);
763 		if (ret)
764 			goto disable_reg_clk;
765 	}
766 
767 	ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
768 					     mv_xor_v2_set_msi_msg);
769 	if (ret)
770 		goto disable_clk;
771 
772 	msi_desc = first_msi_entry(&pdev->dev);
773 	if (!msi_desc)
774 		goto free_msi_irqs;
775 	xor_dev->msi_desc = msi_desc;
776 
777 	ret = devm_request_irq(&pdev->dev, msi_desc->irq,
778 			       mv_xor_v2_interrupt_handler, 0,
779 			       dev_name(&pdev->dev), xor_dev);
780 	if (ret)
781 		goto free_msi_irqs;
782 
783 	tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
784 		     (unsigned long) xor_dev);
785 
786 	xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
787 
788 	dma_cookie_init(&xor_dev->dmachan);
789 
790 	/*
791 	 * allocate coherent memory for hardware descriptors
792 	 * note: writecombine gives slightly better performance, but
793 	 * requires that we explicitly flush the writes
794 	 */
795 	xor_dev->hw_desq_virt =
796 		dma_alloc_coherent(&pdev->dev,
797 				   xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
798 				   &xor_dev->hw_desq, GFP_KERNEL);
799 	if (!xor_dev->hw_desq_virt) {
800 		ret = -ENOMEM;
801 		goto free_msi_irqs;
802 	}
803 
804 	/* alloc memory for the SW descriptors */
805 	xor_dev->sw_desq = devm_kcalloc(&pdev->dev,
806 					MV_XOR_V2_DESC_NUM, sizeof(*sw_desc),
807 					GFP_KERNEL);
808 	if (!xor_dev->sw_desq) {
809 		ret = -ENOMEM;
810 		goto free_hw_desq;
811 	}
812 
813 	spin_lock_init(&xor_dev->lock);
814 
815 	/* init the free SW descriptors list */
816 	INIT_LIST_HEAD(&xor_dev->free_sw_desc);
817 
818 	/* add all SW descriptors to the free list */
819 	for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
820 		struct mv_xor_v2_sw_desc *sw_desc =
821 			xor_dev->sw_desq + i;
822 		sw_desc->idx = i;
823 		dma_async_tx_descriptor_init(&sw_desc->async_tx,
824 					     &xor_dev->dmachan);
825 		sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
826 		async_tx_ack(&sw_desc->async_tx);
827 
828 		list_add(&sw_desc->free_list,
829 			 &xor_dev->free_sw_desc);
830 	}
831 
832 	dma_dev = &xor_dev->dmadev;
833 
834 	/* set DMA capabilities */
835 	dma_cap_zero(dma_dev->cap_mask);
836 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
837 	dma_cap_set(DMA_XOR, dma_dev->cap_mask);
838 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
839 
840 	/* init dma link list */
841 	INIT_LIST_HEAD(&dma_dev->channels);
842 
843 	/* set base routines */
844 	dma_dev->device_tx_status = dma_cookie_status;
845 	dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
846 	dma_dev->dev = &pdev->dev;
847 
848 	dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
849 	dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
850 	dma_dev->max_xor = 8;
851 	dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
852 
853 	xor_dev->dmachan.device = dma_dev;
854 
855 	list_add_tail(&xor_dev->dmachan.device_node,
856 		      &dma_dev->channels);
857 
858 	mv_xor_v2_enable_imsg_thrd(xor_dev);
859 
860 	mv_xor_v2_descq_init(xor_dev);
861 
862 	ret = dma_async_device_register(dma_dev);
863 	if (ret)
864 		goto free_hw_desq;
865 
866 	dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
867 
868 	return 0;
869 
870 free_hw_desq:
871 	dma_free_coherent(&pdev->dev,
872 			  xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
873 			  xor_dev->hw_desq_virt, xor_dev->hw_desq);
874 free_msi_irqs:
875 	platform_msi_domain_free_irqs(&pdev->dev);
876 disable_clk:
877 	clk_disable_unprepare(xor_dev->clk);
878 disable_reg_clk:
879 	clk_disable_unprepare(xor_dev->reg_clk);
880 	return ret;
881 }
882 
883 static int mv_xor_v2_remove(struct platform_device *pdev)
884 {
885 	struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
886 
887 	dma_async_device_unregister(&xor_dev->dmadev);
888 
889 	dma_free_coherent(&pdev->dev,
890 			  xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
891 			  xor_dev->hw_desq_virt, xor_dev->hw_desq);
892 
893 	devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
894 
895 	platform_msi_domain_free_irqs(&pdev->dev);
896 
897 	tasklet_kill(&xor_dev->irq_tasklet);
898 
899 	clk_disable_unprepare(xor_dev->clk);
900 
901 	return 0;
902 }
903 
904 #ifdef CONFIG_OF
905 static const struct of_device_id mv_xor_v2_dt_ids[] = {
906 	{ .compatible = "marvell,xor-v2", },
907 	{},
908 };
909 MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
910 #endif
911 
912 static struct platform_driver mv_xor_v2_driver = {
913 	.probe		= mv_xor_v2_probe,
914 	.suspend	= mv_xor_v2_suspend,
915 	.resume		= mv_xor_v2_resume,
916 	.remove		= mv_xor_v2_remove,
917 	.driver		= {
918 		.name	= "mv_xor_v2",
919 		.of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
920 	},
921 };
922 
923 module_platform_driver(mv_xor_v2_driver);
924 
925 MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
926 MODULE_LICENSE("GPL");
927