xref: /linux/drivers/dma/mv_xor.h (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * Copyright (C) 2007, 2008, Marvell International Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  */
17 
18 #ifndef MV_XOR_H
19 #define MV_XOR_H
20 
21 #include <linux/types.h>
22 #include <linux/io.h>
23 #include <linux/dmaengine.h>
24 #include <linux/interrupt.h>
25 
26 #define MV_XOR_POOL_SIZE		PAGE_SIZE
27 #define MV_XOR_SLOT_SIZE		64
28 #define MV_XOR_THRESHOLD		1
29 #define MV_XOR_MAX_CHANNELS             2
30 
31 #define MV_XOR_MIN_BYTE_COUNT		SZ_128
32 #define MV_XOR_MAX_BYTE_COUNT		(SZ_16M - 1)
33 
34 /* Values for the XOR_CONFIG register */
35 #define XOR_OPERATION_MODE_XOR		0
36 #define XOR_OPERATION_MODE_MEMCPY	2
37 #define XOR_DESCRIPTOR_SWAP		BIT(14)
38 
39 #define XOR_DESC_DMA_OWNED		BIT(31)
40 #define XOR_DESC_EOD_INT_EN		BIT(31)
41 
42 #define XOR_CURR_DESC(chan)	(chan->mmr_high_base + 0x10 + (chan->idx * 4))
43 #define XOR_NEXT_DESC(chan)	(chan->mmr_high_base + 0x00 + (chan->idx * 4))
44 #define XOR_BYTE_COUNT(chan)	(chan->mmr_high_base + 0x20 + (chan->idx * 4))
45 #define XOR_DEST_POINTER(chan)	(chan->mmr_high_base + 0xB0 + (chan->idx * 4))
46 #define XOR_BLOCK_SIZE(chan)	(chan->mmr_high_base + 0xC0 + (chan->idx * 4))
47 #define XOR_INIT_VALUE_LOW(chan)	(chan->mmr_high_base + 0xE0)
48 #define XOR_INIT_VALUE_HIGH(chan)	(chan->mmr_high_base + 0xE4)
49 
50 #define XOR_CONFIG(chan)	(chan->mmr_base + 0x10 + (chan->idx * 4))
51 #define XOR_ACTIVATION(chan)	(chan->mmr_base + 0x20 + (chan->idx * 4))
52 #define XOR_INTR_CAUSE(chan)	(chan->mmr_base + 0x30)
53 #define XOR_INTR_MASK(chan)	(chan->mmr_base + 0x40)
54 #define XOR_ERROR_CAUSE(chan)	(chan->mmr_base + 0x50)
55 #define XOR_ERROR_ADDR(chan)	(chan->mmr_base + 0x60)
56 
57 #define XOR_INT_END_OF_DESC	BIT(0)
58 #define XOR_INT_END_OF_CHAIN	BIT(1)
59 #define XOR_INT_STOPPED		BIT(2)
60 #define XOR_INT_PAUSED		BIT(3)
61 #define XOR_INT_ERR_DECODE	BIT(4)
62 #define XOR_INT_ERR_RDPROT	BIT(5)
63 #define XOR_INT_ERR_WRPROT	BIT(6)
64 #define XOR_INT_ERR_OWN		BIT(7)
65 #define XOR_INT_ERR_PAR		BIT(8)
66 #define XOR_INT_ERR_MBUS	BIT(9)
67 
68 #define XOR_INTR_ERRORS		(XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
69 				 XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN    | \
70 				 XOR_INT_ERR_PAR    | XOR_INT_ERR_MBUS)
71 
72 #define XOR_INTR_MASK_VALUE	(XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
73 				 XOR_INT_STOPPED     | XOR_INTR_ERRORS)
74 
75 #define WINDOW_BASE(w)		(0x50 + ((w) << 2))
76 #define WINDOW_SIZE(w)		(0x70 + ((w) << 2))
77 #define WINDOW_REMAP_HIGH(w)	(0x90 + ((w) << 2))
78 #define WINDOW_BAR_ENABLE(chan)	(0x40 + ((chan) << 2))
79 #define WINDOW_OVERRIDE_CTRL(chan)	(0xA0 + ((chan) << 2))
80 
81 struct mv_xor_device {
82 	void __iomem	     *xor_base;
83 	void __iomem	     *xor_high_base;
84 	struct clk	     *clk;
85 	struct mv_xor_chan   *channels[MV_XOR_MAX_CHANNELS];
86 };
87 
88 /**
89  * struct mv_xor_chan - internal representation of a XOR channel
90  * @pending: allows batching of hardware operations
91  * @lock: serializes enqueue/dequeue operations to the descriptors pool
92  * @mmr_base: memory mapped register base
93  * @idx: the index of the xor channel
94  * @chain: device chain view of the descriptors
95  * @completed_slots: slots completed by HW but still need to be acked
96  * @device: parent device
97  * @common: common dmaengine channel object members
98  * @last_used: place holder for allocation to continue from where it left off
99  * @all_slots: complete domain of slots usable by the channel
100  * @slots_allocated: records the actual size of the descriptor slot pool
101  * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
102  */
103 struct mv_xor_chan {
104 	int			pending;
105 	spinlock_t		lock; /* protects the descriptor slot pool */
106 	void __iomem		*mmr_base;
107 	void __iomem		*mmr_high_base;
108 	unsigned int		idx;
109 	int                     irq;
110 	enum dma_transaction_type	current_type;
111 	struct list_head	chain;
112 	struct list_head	completed_slots;
113 	dma_addr_t		dma_desc_pool;
114 	void			*dma_desc_pool_virt;
115 	size_t                  pool_size;
116 	struct dma_device	dmadev;
117 	struct dma_chan		dmachan;
118 	struct mv_xor_desc_slot	*last_used;
119 	struct list_head	all_slots;
120 	int			slots_allocated;
121 	struct tasklet_struct	irq_tasklet;
122 	char			dummy_src[MV_XOR_MIN_BYTE_COUNT];
123 	char			dummy_dst[MV_XOR_MIN_BYTE_COUNT];
124 	dma_addr_t		dummy_src_addr, dummy_dst_addr;
125 };
126 
127 /**
128  * struct mv_xor_desc_slot - software descriptor
129  * @slot_node: node on the mv_xor_chan.all_slots list
130  * @chain_node: node on the mv_xor_chan.chain list
131  * @completed_node: node on the mv_xor_chan.completed_slots list
132  * @hw_desc: virtual address of the hardware descriptor chain
133  * @phys: hardware address of the hardware descriptor chain
134  * @slot_used: slot in use or not
135  * @idx: pool index
136  * @tx_list: list of slots that make up a multi-descriptor transaction
137  * @async_tx: support for the async_tx api
138  */
139 struct mv_xor_desc_slot {
140 	struct list_head	slot_node;
141 	struct list_head	chain_node;
142 	struct list_head	completed_node;
143 	enum dma_transaction_type	type;
144 	void			*hw_desc;
145 	u16			slot_used;
146 	u16			idx;
147 	struct dma_async_tx_descriptor	async_tx;
148 };
149 
150 /*
151  * This structure describes XOR descriptor size 64bytes. The
152  * mv_phy_src_idx() macro must be used when indexing the values of the
153  * phy_src_addr[] array. This is due to the fact that the 'descriptor
154  * swap' feature, used on big endian systems, swaps descriptors data
155  * within blocks of 8 bytes. So two consecutive values of the
156  * phy_src_addr[] array are actually swapped in big-endian, which
157  * explains the different mv_phy_src_idx() implementation.
158  */
159 #if defined(__LITTLE_ENDIAN)
160 struct mv_xor_desc {
161 	u32 status;		/* descriptor execution status */
162 	u32 crc32_result;	/* result of CRC-32 calculation */
163 	u32 desc_command;	/* type of operation to be carried out */
164 	u32 phy_next_desc;	/* next descriptor address pointer */
165 	u32 byte_count;		/* size of src/dst blocks in bytes */
166 	u32 phy_dest_addr;	/* destination block address */
167 	u32 phy_src_addr[8];	/* source block addresses */
168 	u32 reserved0;
169 	u32 reserved1;
170 };
171 #define mv_phy_src_idx(src_idx) (src_idx)
172 #else
173 struct mv_xor_desc {
174 	u32 crc32_result;	/* result of CRC-32 calculation */
175 	u32 status;		/* descriptor execution status */
176 	u32 phy_next_desc;	/* next descriptor address pointer */
177 	u32 desc_command;	/* type of operation to be carried out */
178 	u32 phy_dest_addr;	/* destination block address */
179 	u32 byte_count;		/* size of src/dst blocks in bytes */
180 	u32 phy_src_addr[8];	/* source block addresses */
181 	u32 reserved1;
182 	u32 reserved0;
183 };
184 #define mv_phy_src_idx(src_idx) (src_idx ^ 1)
185 #endif
186 
187 #define to_mv_sw_desc(addr_hw_desc)		\
188 	container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
189 
190 #define mv_hw_desc_slot_idx(hw_desc, idx)	\
191 	((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
192 
193 #endif
194