xref: /linux/drivers/dma/hisi_dma.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2019-2022 HiSilicon Limited. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/dmaengine.h>
6 #include <linux/init.h>
7 #include <linux/iopoll.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/spinlock.h>
11 #include "virt-dma.h"
12 
13 /* HiSilicon DMA register common field define */
14 #define HISI_DMA_Q_SQ_BASE_L			0x0
15 #define HISI_DMA_Q_SQ_BASE_H			0x4
16 #define HISI_DMA_Q_SQ_DEPTH			0x8
17 #define HISI_DMA_Q_SQ_TAIL_PTR			0xc
18 #define HISI_DMA_Q_CQ_BASE_L			0x10
19 #define HISI_DMA_Q_CQ_BASE_H			0x14
20 #define HISI_DMA_Q_CQ_DEPTH			0x18
21 #define HISI_DMA_Q_CQ_HEAD_PTR			0x1c
22 #define HISI_DMA_Q_CTRL0			0x20
23 #define HISI_DMA_Q_CTRL0_QUEUE_EN		BIT(0)
24 #define HISI_DMA_Q_CTRL0_QUEUE_PAUSE		BIT(4)
25 #define HISI_DMA_Q_CTRL1			0x24
26 #define HISI_DMA_Q_CTRL1_QUEUE_RESET		BIT(0)
27 #define HISI_DMA_Q_FSM_STS			0x30
28 #define HISI_DMA_Q_FSM_STS_MASK			GENMASK(3, 0)
29 #define HISI_DMA_Q_ERR_INT_NUM0			0x84
30 #define HISI_DMA_Q_ERR_INT_NUM1			0x88
31 #define HISI_DMA_Q_ERR_INT_NUM2			0x8c
32 
33 /* HiSilicon IP08 DMA register and field define */
34 #define HISI_DMA_HIP08_MODE			0x217C
35 #define HISI_DMA_HIP08_Q_BASE			0x0
36 #define HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN	BIT(2)
37 #define HISI_DMA_HIP08_Q_INT_STS		0x40
38 #define HISI_DMA_HIP08_Q_INT_MSK		0x44
39 #define HISI_DMA_HIP08_Q_INT_STS_MASK		GENMASK(14, 0)
40 #define HISI_DMA_HIP08_Q_ERR_INT_NUM3		0x90
41 #define HISI_DMA_HIP08_Q_ERR_INT_NUM4		0x94
42 #define HISI_DMA_HIP08_Q_ERR_INT_NUM5		0x98
43 #define HISI_DMA_HIP08_Q_ERR_INT_NUM6		0x48
44 #define HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT	BIT(24)
45 
46 /* HiSilicon IP09 DMA register and field define */
47 #define HISI_DMA_HIP09_DMA_FLR_DISABLE		0xA00
48 #define HISI_DMA_HIP09_DMA_FLR_DISABLE_B	BIT(0)
49 #define HISI_DMA_HIP09_Q_BASE			0x2000
50 #define HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN	GENMASK(31, 28)
51 #define HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT		BIT(26)
52 #define HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT		BIT(27)
53 #define HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE	BIT(2)
54 #define HISI_DMA_HIP09_Q_INT_STS		0x40
55 #define HISI_DMA_HIP09_Q_INT_MSK		0x44
56 #define HISI_DMA_HIP09_Q_INT_STS_MASK		0x1
57 #define HISI_DMA_HIP09_Q_ERR_INT_STS		0x48
58 #define HISI_DMA_HIP09_Q_ERR_INT_MSK		0x4C
59 #define HISI_DMA_HIP09_Q_ERR_INT_STS_MASK	GENMASK(18, 1)
60 #define HISI_DMA_HIP09_PORT_CFG_REG(port_id)	(0x800 + \
61 						(port_id) * 0x20)
62 #define HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B	BIT(16)
63 
64 #define HISI_DMA_HIP09_MAX_PORT_NUM		16
65 
66 #define HISI_DMA_HIP08_MSI_NUM			32
67 #define HISI_DMA_HIP08_CHAN_NUM			30
68 #define HISI_DMA_HIP09_MSI_NUM			4
69 #define HISI_DMA_HIP09_CHAN_NUM			4
70 #define HISI_DMA_REVISION_HIP08B		0x21
71 #define HISI_DMA_REVISION_HIP09A		0x30
72 
73 #define HISI_DMA_Q_OFFSET			0x100
74 #define HISI_DMA_Q_DEPTH_VAL			1024
75 
76 #define PCI_BAR_2				2
77 
78 #define HISI_DMA_POLL_Q_STS_DELAY_US		10
79 #define HISI_DMA_POLL_Q_STS_TIME_OUT_US		1000
80 
81 #define HISI_DMA_MAX_DIR_NAME_LEN		128
82 
83 /*
84  * The HIP08B(HiSilicon IP08) and HIP09A(HiSilicon IP09) are DMA iEPs, they
85  * have the same pci device id but different pci revision.
86  * Unfortunately, they have different register layouts, so two layout
87  * enumerations are defined.
88  */
89 enum hisi_dma_reg_layout {
90 	HISI_DMA_REG_LAYOUT_INVALID = 0,
91 	HISI_DMA_REG_LAYOUT_HIP08,
92 	HISI_DMA_REG_LAYOUT_HIP09
93 };
94 
95 enum hisi_dma_mode {
96 	EP = 0,
97 	RC,
98 };
99 
100 enum hisi_dma_chan_status {
101 	DISABLE = -1,
102 	IDLE = 0,
103 	RUN,
104 	CPL,
105 	PAUSE,
106 	HALT,
107 	ABORT,
108 	WAIT,
109 	BUFFCLR,
110 };
111 
112 struct hisi_dma_sqe {
113 	__le32 dw0;
114 #define OPCODE_MASK			GENMASK(3, 0)
115 #define OPCODE_SMALL_PACKAGE		0x1
116 #define OPCODE_M2M			0x4
117 #define LOCAL_IRQ_EN			BIT(8)
118 #define ATTR_SRC_MASK			GENMASK(14, 12)
119 	__le32 dw1;
120 	__le32 dw2;
121 #define ATTR_DST_MASK			GENMASK(26, 24)
122 	__le32 length;
123 	__le64 src_addr;
124 	__le64 dst_addr;
125 };
126 
127 struct hisi_dma_cqe {
128 	__le32 rsv0;
129 	__le32 rsv1;
130 	__le16 sq_head;
131 	__le16 rsv2;
132 	__le16 rsv3;
133 	__le16 w0;
134 #define STATUS_MASK			GENMASK(15, 1)
135 #define STATUS_SUCC			0x0
136 #define VALID_BIT			BIT(0)
137 };
138 
139 struct hisi_dma_desc {
140 	struct virt_dma_desc vd;
141 	struct hisi_dma_sqe sqe;
142 };
143 
144 struct hisi_dma_chan {
145 	struct virt_dma_chan vc;
146 	struct hisi_dma_dev *hdma_dev;
147 	struct hisi_dma_sqe *sq;
148 	struct hisi_dma_cqe *cq;
149 	dma_addr_t sq_dma;
150 	dma_addr_t cq_dma;
151 	u32 sq_tail;
152 	u32 cq_head;
153 	u32 qp_num;
154 	enum hisi_dma_chan_status status;
155 	struct hisi_dma_desc *desc;
156 };
157 
158 struct hisi_dma_dev {
159 	struct pci_dev *pdev;
160 	void __iomem *base;
161 	struct dma_device dma_dev;
162 	u32 chan_num;
163 	u32 chan_depth;
164 	enum hisi_dma_reg_layout reg_layout;
165 	void __iomem *queue_base; /* queue region start of register */
166 	struct hisi_dma_chan chan[];
167 };
168 
169 #ifdef CONFIG_DEBUG_FS
170 
171 static const struct debugfs_reg32 hisi_dma_comm_chan_regs[] = {
172 	{"DMA_QUEUE_SQ_DEPTH                ", 0x0008ull},
173 	{"DMA_QUEUE_SQ_TAIL_PTR             ", 0x000Cull},
174 	{"DMA_QUEUE_CQ_DEPTH                ", 0x0018ull},
175 	{"DMA_QUEUE_CQ_HEAD_PTR             ", 0x001Cull},
176 	{"DMA_QUEUE_CTRL0                   ", 0x0020ull},
177 	{"DMA_QUEUE_CTRL1                   ", 0x0024ull},
178 	{"DMA_QUEUE_FSM_STS                 ", 0x0030ull},
179 	{"DMA_QUEUE_SQ_STS                  ", 0x0034ull},
180 	{"DMA_QUEUE_CQ_TAIL_PTR             ", 0x003Cull},
181 	{"DMA_QUEUE_INT_STS                 ", 0x0040ull},
182 	{"DMA_QUEUE_INT_MSK                 ", 0x0044ull},
183 	{"DMA_QUEUE_INT_RO                  ", 0x006Cull},
184 };
185 
186 static const struct debugfs_reg32 hisi_dma_hip08_chan_regs[] = {
187 	{"DMA_QUEUE_BYTE_CNT                ", 0x0038ull},
188 	{"DMA_ERR_INT_NUM6                  ", 0x0048ull},
189 	{"DMA_QUEUE_DESP0                   ", 0x0050ull},
190 	{"DMA_QUEUE_DESP1                   ", 0x0054ull},
191 	{"DMA_QUEUE_DESP2                   ", 0x0058ull},
192 	{"DMA_QUEUE_DESP3                   ", 0x005Cull},
193 	{"DMA_QUEUE_DESP4                   ", 0x0074ull},
194 	{"DMA_QUEUE_DESP5                   ", 0x0078ull},
195 	{"DMA_QUEUE_DESP6                   ", 0x007Cull},
196 	{"DMA_QUEUE_DESP7                   ", 0x0080ull},
197 	{"DMA_ERR_INT_NUM0                  ", 0x0084ull},
198 	{"DMA_ERR_INT_NUM1                  ", 0x0088ull},
199 	{"DMA_ERR_INT_NUM2                  ", 0x008Cull},
200 	{"DMA_ERR_INT_NUM3                  ", 0x0090ull},
201 	{"DMA_ERR_INT_NUM4                  ", 0x0094ull},
202 	{"DMA_ERR_INT_NUM5                  ", 0x0098ull},
203 	{"DMA_QUEUE_SQ_STS2                 ", 0x00A4ull},
204 };
205 
206 static const struct debugfs_reg32 hisi_dma_hip09_chan_regs[] = {
207 	{"DMA_QUEUE_ERR_INT_STS             ", 0x0048ull},
208 	{"DMA_QUEUE_ERR_INT_MSK             ", 0x004Cull},
209 	{"DFX_SQ_READ_ERR_PTR               ", 0x0068ull},
210 	{"DFX_DMA_ERR_INT_NUM0              ", 0x0084ull},
211 	{"DFX_DMA_ERR_INT_NUM1              ", 0x0088ull},
212 	{"DFX_DMA_ERR_INT_NUM2              ", 0x008Cull},
213 	{"DFX_DMA_QUEUE_SQ_STS2             ", 0x00A4ull},
214 };
215 
216 static const struct debugfs_reg32 hisi_dma_hip08_comm_regs[] = {
217 	{"DMA_ECC_ERR_ADDR                  ", 0x2004ull},
218 	{"DMA_ECC_ECC_CNT                   ", 0x2014ull},
219 	{"COMMON_AND_CH_ERR_STS             ", 0x2030ull},
220 	{"LOCAL_CPL_ID_STS_0                ", 0x20E0ull},
221 	{"LOCAL_CPL_ID_STS_1                ", 0x20E4ull},
222 	{"LOCAL_CPL_ID_STS_2                ", 0x20E8ull},
223 	{"LOCAL_CPL_ID_STS_3                ", 0x20ECull},
224 	{"LOCAL_TLP_NUM                     ", 0x2158ull},
225 	{"SQCQ_TLP_NUM                      ", 0x2164ull},
226 	{"CPL_NUM                           ", 0x2168ull},
227 	{"INF_BACK_PRESS_STS                ", 0x2170ull},
228 	{"DMA_CH_RAS_LEVEL                  ", 0x2184ull},
229 	{"DMA_CM_RAS_LEVEL                  ", 0x2188ull},
230 	{"DMA_CH_ERR_STS                    ", 0x2190ull},
231 	{"DMA_CH_DONE_STS                   ", 0x2194ull},
232 	{"DMA_SQ_TAG_STS_0                  ", 0x21A0ull},
233 	{"DMA_SQ_TAG_STS_1                  ", 0x21A4ull},
234 	{"DMA_SQ_TAG_STS_2                  ", 0x21A8ull},
235 	{"DMA_SQ_TAG_STS_3                  ", 0x21ACull},
236 	{"LOCAL_P_ID_STS_0                  ", 0x21B0ull},
237 	{"LOCAL_P_ID_STS_1                  ", 0x21B4ull},
238 	{"LOCAL_P_ID_STS_2                  ", 0x21B8ull},
239 	{"LOCAL_P_ID_STS_3                  ", 0x21BCull},
240 	{"DMA_PREBUFF_INFO_0                ", 0x2200ull},
241 	{"DMA_CM_TABLE_INFO_0               ", 0x2220ull},
242 	{"DMA_CM_CE_RO                      ", 0x2244ull},
243 	{"DMA_CM_NFE_RO                     ", 0x2248ull},
244 	{"DMA_CM_FE_RO                      ", 0x224Cull},
245 };
246 
247 static const struct debugfs_reg32 hisi_dma_hip09_comm_regs[] = {
248 	{"COMMON_AND_CH_ERR_STS             ", 0x0030ull},
249 	{"DMA_PORT_IDLE_STS                 ", 0x0150ull},
250 	{"DMA_CH_RAS_LEVEL                  ", 0x0184ull},
251 	{"DMA_CM_RAS_LEVEL                  ", 0x0188ull},
252 	{"DMA_CM_CE_RO                      ", 0x0244ull},
253 	{"DMA_CM_NFE_RO                     ", 0x0248ull},
254 	{"DMA_CM_FE_RO                      ", 0x024Cull},
255 	{"DFX_INF_BACK_PRESS_STS0           ", 0x1A40ull},
256 	{"DFX_INF_BACK_PRESS_STS1           ", 0x1A44ull},
257 	{"DFX_INF_BACK_PRESS_STS2           ", 0x1A48ull},
258 	{"DFX_DMA_WRR_DISABLE               ", 0x1A4Cull},
259 	{"DFX_PA_REQ_TLP_NUM                ", 0x1C00ull},
260 	{"DFX_PA_BACK_TLP_NUM               ", 0x1C04ull},
261 	{"DFX_PA_RETRY_TLP_NUM              ", 0x1C08ull},
262 	{"DFX_LOCAL_NP_TLP_NUM              ", 0x1C0Cull},
263 	{"DFX_LOCAL_CPL_HEAD_TLP_NUM        ", 0x1C10ull},
264 	{"DFX_LOCAL_CPL_DATA_TLP_NUM        ", 0x1C14ull},
265 	{"DFX_LOCAL_CPL_EXT_DATA_TLP_NUM    ", 0x1C18ull},
266 	{"DFX_LOCAL_P_HEAD_TLP_NUM          ", 0x1C1Cull},
267 	{"DFX_LOCAL_P_ACK_TLP_NUM           ", 0x1C20ull},
268 	{"DFX_BUF_ALOC_PORT_REQ_NUM         ", 0x1C24ull},
269 	{"DFX_BUF_ALOC_PORT_RESULT_NUM      ", 0x1C28ull},
270 	{"DFX_BUF_FAIL_SIZE_NUM             ", 0x1C2Cull},
271 	{"DFX_BUF_ALOC_SIZE_NUM             ", 0x1C30ull},
272 	{"DFX_BUF_NP_RELEASE_SIZE_NUM       ", 0x1C34ull},
273 	{"DFX_BUF_P_RELEASE_SIZE_NUM        ", 0x1C38ull},
274 	{"DFX_BUF_PORT_RELEASE_SIZE_NUM     ", 0x1C3Cull},
275 	{"DFX_DMA_PREBUF_MEM0_ECC_ERR_ADDR  ", 0x1CA8ull},
276 	{"DFX_DMA_PREBUF_MEM0_ECC_CNT       ", 0x1CACull},
277 	{"DFX_DMA_LOC_NP_OSTB_ECC_ERR_ADDR  ", 0x1CB0ull},
278 	{"DFX_DMA_LOC_NP_OSTB_ECC_CNT       ", 0x1CB4ull},
279 	{"DFX_DMA_PREBUF_MEM1_ECC_ERR_ADDR  ", 0x1CC0ull},
280 	{"DFX_DMA_PREBUF_MEM1_ECC_CNT       ", 0x1CC4ull},
281 	{"DMA_CH_DONE_STS                   ", 0x02E0ull},
282 	{"DMA_CH_ERR_STS                    ", 0x0320ull},
283 };
284 #endif /* CONFIG_DEBUG_FS*/
285 
286 static enum hisi_dma_reg_layout hisi_dma_get_reg_layout(struct pci_dev *pdev)
287 {
288 	if (pdev->revision == HISI_DMA_REVISION_HIP08B)
289 		return HISI_DMA_REG_LAYOUT_HIP08;
290 	else if (pdev->revision >= HISI_DMA_REVISION_HIP09A)
291 		return HISI_DMA_REG_LAYOUT_HIP09;
292 
293 	return HISI_DMA_REG_LAYOUT_INVALID;
294 }
295 
296 static u32 hisi_dma_get_chan_num(struct pci_dev *pdev)
297 {
298 	if (pdev->revision == HISI_DMA_REVISION_HIP08B)
299 		return HISI_DMA_HIP08_CHAN_NUM;
300 
301 	return HISI_DMA_HIP09_CHAN_NUM;
302 }
303 
304 static u32 hisi_dma_get_msi_num(struct pci_dev *pdev)
305 {
306 	if (pdev->revision == HISI_DMA_REVISION_HIP08B)
307 		return HISI_DMA_HIP08_MSI_NUM;
308 
309 	return HISI_DMA_HIP09_MSI_NUM;
310 }
311 
312 static u32 hisi_dma_get_queue_base(struct pci_dev *pdev)
313 {
314 	if (pdev->revision == HISI_DMA_REVISION_HIP08B)
315 		return HISI_DMA_HIP08_Q_BASE;
316 
317 	return HISI_DMA_HIP09_Q_BASE;
318 }
319 
320 static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
321 {
322 	return container_of(c, struct hisi_dma_chan, vc.chan);
323 }
324 
325 static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
326 {
327 	return container_of(vd, struct hisi_dma_desc, vd);
328 }
329 
330 static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
331 				       u32 val)
332 {
333 	writel_relaxed(val, base + reg + index * HISI_DMA_Q_OFFSET);
334 }
335 
336 static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
337 {
338 	u32 tmp;
339 
340 	tmp = readl_relaxed(addr);
341 	tmp = val ? tmp | pos : tmp & ~pos;
342 	writel_relaxed(tmp, addr);
343 }
344 
345 static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
346 			       bool pause)
347 {
348 	void __iomem *addr;
349 
350 	addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
351 	       index * HISI_DMA_Q_OFFSET;
352 	hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_PAUSE, pause);
353 }
354 
355 static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
356 				bool enable)
357 {
358 	void __iomem *addr;
359 
360 	addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
361 	       index * HISI_DMA_Q_OFFSET;
362 	hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_EN, enable);
363 }
364 
365 static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
366 {
367 	void __iomem *q_base = hdma_dev->queue_base;
368 
369 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
370 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
371 				    qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
372 	else {
373 		hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
374 				    qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
375 		hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
376 				    qp_index,
377 				    HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
378 	}
379 }
380 
381 static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
382 {
383 	void __iomem *q_base = hdma_dev->queue_base;
384 
385 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
386 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_STS,
387 				    qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
388 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
389 				    qp_index, 0);
390 	} else {
391 		hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_STS,
392 				    qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
393 		hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_STS,
394 				    qp_index,
395 				    HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
396 		hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
397 				    qp_index, 0);
398 		hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
399 				    qp_index, 0);
400 	}
401 }
402 
403 static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
404 {
405 	void __iomem *addr;
406 
407 	addr = hdma_dev->queue_base +
408 	       HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
409 	hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL1_QUEUE_RESET, 1);
410 }
411 
412 static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
413 {
414 	void __iomem *q_base = hdma_dev->queue_base;
415 
416 	hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
417 	hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
418 }
419 
420 static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan,
421 					      bool disable)
422 {
423 	struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
424 	u32 index = chan->qp_num, tmp;
425 	void __iomem *addr;
426 	int ret;
427 
428 	hisi_dma_pause_dma(hdma_dev, index, true);
429 	hisi_dma_enable_dma(hdma_dev, index, false);
430 	hisi_dma_mask_irq(hdma_dev, index);
431 
432 	addr = hdma_dev->queue_base +
433 	       HISI_DMA_Q_FSM_STS + index * HISI_DMA_Q_OFFSET;
434 
435 	ret = readl_relaxed_poll_timeout(addr, tmp,
436 		FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) != RUN,
437 		HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
438 	if (ret) {
439 		dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
440 		WARN_ON(1);
441 	}
442 
443 	hisi_dma_do_reset(hdma_dev, index);
444 	hisi_dma_reset_qp_point(hdma_dev, index);
445 	hisi_dma_pause_dma(hdma_dev, index, false);
446 
447 	if (!disable) {
448 		hisi_dma_enable_dma(hdma_dev, index, true);
449 		hisi_dma_unmask_irq(hdma_dev, index);
450 	}
451 
452 	ret = readl_relaxed_poll_timeout(addr, tmp,
453 		FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) == IDLE,
454 		HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
455 	if (ret) {
456 		dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
457 		WARN_ON(1);
458 	}
459 }
460 
461 static void hisi_dma_free_chan_resources(struct dma_chan *c)
462 {
463 	struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
464 	struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
465 
466 	hisi_dma_reset_or_disable_hw_chan(chan, false);
467 	vchan_free_chan_resources(&chan->vc);
468 
469 	memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
470 	memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth);
471 	chan->sq_tail = 0;
472 	chan->cq_head = 0;
473 	chan->status = DISABLE;
474 }
475 
476 static void hisi_dma_desc_free(struct virt_dma_desc *vd)
477 {
478 	kfree(to_hisi_dma_desc(vd));
479 }
480 
481 static struct dma_async_tx_descriptor *
482 hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src,
483 			 size_t len, unsigned long flags)
484 {
485 	struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
486 	struct hisi_dma_desc *desc;
487 
488 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
489 	if (!desc)
490 		return NULL;
491 
492 	desc->sqe.length = cpu_to_le32(len);
493 	desc->sqe.src_addr = cpu_to_le64(src);
494 	desc->sqe.dst_addr = cpu_to_le64(dst);
495 
496 	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
497 }
498 
499 static enum dma_status
500 hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
501 		   struct dma_tx_state *txstate)
502 {
503 	return dma_cookie_status(c, cookie, txstate);
504 }
505 
506 static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
507 {
508 	struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
509 	struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
510 	struct hisi_dma_desc *desc;
511 	struct virt_dma_desc *vd;
512 
513 	vd = vchan_next_desc(&chan->vc);
514 	if (!vd) {
515 		chan->desc = NULL;
516 		return;
517 	}
518 	list_del(&vd->node);
519 	desc = to_hisi_dma_desc(vd);
520 	chan->desc = desc;
521 
522 	memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
523 
524 	/* update other field in sqe */
525 	sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
526 	sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
527 
528 	/* make sure data has been updated in sqe */
529 	wmb();
530 
531 	/* update sq tail, point to new sqe position */
532 	chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
533 
534 	/* update sq_tail to trigger a new task */
535 	hisi_dma_chan_write(hdma_dev->queue_base, HISI_DMA_Q_SQ_TAIL_PTR,
536 			    chan->qp_num, chan->sq_tail);
537 }
538 
539 static void hisi_dma_issue_pending(struct dma_chan *c)
540 {
541 	struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
542 	unsigned long flags;
543 
544 	spin_lock_irqsave(&chan->vc.lock, flags);
545 
546 	if (vchan_issue_pending(&chan->vc) && !chan->desc)
547 		hisi_dma_start_transfer(chan);
548 
549 	spin_unlock_irqrestore(&chan->vc.lock, flags);
550 }
551 
552 static int hisi_dma_terminate_all(struct dma_chan *c)
553 {
554 	struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
555 	unsigned long flags;
556 	LIST_HEAD(head);
557 
558 	spin_lock_irqsave(&chan->vc.lock, flags);
559 
560 	hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true);
561 	if (chan->desc) {
562 		vchan_terminate_vdesc(&chan->desc->vd);
563 		chan->desc = NULL;
564 	}
565 
566 	vchan_get_all_descriptors(&chan->vc, &head);
567 
568 	spin_unlock_irqrestore(&chan->vc.lock, flags);
569 
570 	vchan_dma_desc_free_list(&chan->vc, &head);
571 	hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false);
572 
573 	return 0;
574 }
575 
576 static void hisi_dma_synchronize(struct dma_chan *c)
577 {
578 	struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
579 
580 	vchan_synchronize(&chan->vc);
581 }
582 
583 static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
584 {
585 	size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth;
586 	size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth;
587 	struct device *dev = &hdma_dev->pdev->dev;
588 	struct hisi_dma_chan *chan;
589 	int i;
590 
591 	for (i = 0; i < hdma_dev->chan_num; i++) {
592 		chan = &hdma_dev->chan[i];
593 		chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma,
594 					       GFP_KERNEL);
595 		if (!chan->sq)
596 			return -ENOMEM;
597 
598 		chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma,
599 					       GFP_KERNEL);
600 		if (!chan->cq)
601 			return -ENOMEM;
602 	}
603 
604 	return 0;
605 }
606 
607 static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
608 {
609 	struct hisi_dma_chan *chan = &hdma_dev->chan[index];
610 	void __iomem *q_base = hdma_dev->queue_base;
611 	u32 hw_depth = hdma_dev->chan_depth - 1;
612 	void __iomem *addr;
613 	u32 tmp;
614 
615 	/* set sq, cq base */
616 	hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_L, index,
617 			    lower_32_bits(chan->sq_dma));
618 	hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_H, index,
619 			    upper_32_bits(chan->sq_dma));
620 	hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_L, index,
621 			    lower_32_bits(chan->cq_dma));
622 	hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_H, index,
623 			    upper_32_bits(chan->cq_dma));
624 
625 	/* set sq, cq depth */
626 	hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_DEPTH, index, hw_depth);
627 	hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_DEPTH, index, hw_depth);
628 
629 	/* init sq tail and cq head */
630 	hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
631 	hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
632 
633 	/* init error interrupt stats */
634 	hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM0, index, 0);
635 	hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM1, index, 0);
636 	hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM2, index, 0);
637 
638 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
639 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM3,
640 				    index, 0);
641 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM4,
642 				    index, 0);
643 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM5,
644 				    index, 0);
645 		hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM6,
646 				    index, 0);
647 		/*
648 		 * init SQ/CQ direction selecting register.
649 		 * "0" is to local side and "1" is to remote side.
650 		 */
651 		addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
652 		hisi_dma_update_bit(addr, HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT, 0);
653 
654 		/*
655 		 * 0 - Continue to next descriptor if error occurs.
656 		 * 1 - Abort the DMA queue if error occurs.
657 		 */
658 		hisi_dma_update_bit(addr,
659 				    HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN, 0);
660 	} else {
661 		addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
662 
663 		/*
664 		 * init SQ/CQ direction selecting register.
665 		 * "0" is to local side and "1" is to remote side.
666 		 */
667 		hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT, 0);
668 		hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT, 0);
669 
670 		/*
671 		 * 0 - Continue to next descriptor if error occurs.
672 		 * 1 - Abort the DMA queue if error occurs.
673 		 */
674 
675 		tmp = readl_relaxed(addr);
676 		tmp &= ~HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN;
677 		writel_relaxed(tmp, addr);
678 
679 		/*
680 		 * 0 - dma should process FLR whith CPU.
681 		 * 1 - dma not process FLR, only cpu process FLR.
682 		 */
683 		addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
684 		       index * HISI_DMA_Q_OFFSET;
685 		hisi_dma_update_bit(addr, HISI_DMA_HIP09_DMA_FLR_DISABLE_B, 0);
686 
687 		addr = q_base + HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
688 		hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE, 1);
689 	}
690 }
691 
692 static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
693 {
694 	hisi_dma_init_hw_qp(hdma_dev, qp_index);
695 	hisi_dma_unmask_irq(hdma_dev, qp_index);
696 	hisi_dma_enable_dma(hdma_dev, qp_index, true);
697 }
698 
699 static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
700 {
701 	hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true);
702 }
703 
704 static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
705 {
706 	int i;
707 
708 	for (i = 0; i < hdma_dev->chan_num; i++) {
709 		hdma_dev->chan[i].qp_num = i;
710 		hdma_dev->chan[i].hdma_dev = hdma_dev;
711 		hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free;
712 		vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev);
713 		hisi_dma_enable_qp(hdma_dev, i);
714 	}
715 }
716 
717 static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
718 {
719 	int i;
720 
721 	for (i = 0; i < hdma_dev->chan_num; i++) {
722 		hisi_dma_disable_qp(hdma_dev, i);
723 		tasklet_kill(&hdma_dev->chan[i].vc.task);
724 	}
725 }
726 
727 static irqreturn_t hisi_dma_irq(int irq, void *data)
728 {
729 	struct hisi_dma_chan *chan = data;
730 	struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
731 	struct hisi_dma_desc *desc;
732 	struct hisi_dma_cqe *cqe;
733 	void __iomem *q_base;
734 
735 	spin_lock(&chan->vc.lock);
736 
737 	desc = chan->desc;
738 	cqe = chan->cq + chan->cq_head;
739 	q_base = hdma_dev->queue_base;
740 	if (desc) {
741 		chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth;
742 		hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR,
743 				    chan->qp_num, chan->cq_head);
744 		if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
745 			vchan_cookie_complete(&desc->vd);
746 			hisi_dma_start_transfer(chan);
747 		} else {
748 			dev_err(&hdma_dev->pdev->dev, "task error!\n");
749 		}
750 	}
751 
752 	spin_unlock(&chan->vc.lock);
753 
754 	return IRQ_HANDLED;
755 }
756 
757 static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev)
758 {
759 	struct pci_dev *pdev = hdma_dev->pdev;
760 	int i, ret;
761 
762 	for (i = 0; i < hdma_dev->chan_num; i++) {
763 		ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
764 				       hisi_dma_irq, IRQF_SHARED, "hisi_dma",
765 				       &hdma_dev->chan[i]);
766 		if (ret)
767 			return ret;
768 	}
769 
770 	return 0;
771 }
772 
773 /* This function enables all hw channels in a device */
774 static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev)
775 {
776 	int ret;
777 
778 	ret = hisi_dma_alloc_qps_mem(hdma_dev);
779 	if (ret) {
780 		dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n");
781 		return ret;
782 	}
783 
784 	ret = hisi_dma_request_qps_irq(hdma_dev);
785 	if (ret) {
786 		dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n");
787 		return ret;
788 	}
789 
790 	hisi_dma_enable_qps(hdma_dev);
791 
792 	return 0;
793 }
794 
795 static void hisi_dma_disable_hw_channels(void *data)
796 {
797 	hisi_dma_disable_qps(data);
798 }
799 
800 static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
801 			      enum hisi_dma_mode mode)
802 {
803 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
804 		writel_relaxed(mode == RC ? 1 : 0,
805 			       hdma_dev->base + HISI_DMA_HIP08_MODE);
806 }
807 
808 static void hisi_dma_init_hw(struct hisi_dma_dev *hdma_dev)
809 {
810 	void __iomem *addr;
811 	int i;
812 
813 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
814 		for (i = 0; i < HISI_DMA_HIP09_MAX_PORT_NUM; i++) {
815 			addr = hdma_dev->base + HISI_DMA_HIP09_PORT_CFG_REG(i);
816 			hisi_dma_update_bit(addr,
817 				HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B, 1);
818 		}
819 	}
820 }
821 
822 static void hisi_dma_init_dma_dev(struct hisi_dma_dev *hdma_dev)
823 {
824 	struct dma_device *dma_dev;
825 
826 	dma_dev = &hdma_dev->dma_dev;
827 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
828 	dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
829 	dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
830 	dma_dev->device_tx_status = hisi_dma_tx_status;
831 	dma_dev->device_issue_pending = hisi_dma_issue_pending;
832 	dma_dev->device_terminate_all = hisi_dma_terminate_all;
833 	dma_dev->device_synchronize = hisi_dma_synchronize;
834 	dma_dev->directions = BIT(DMA_MEM_TO_MEM);
835 	dma_dev->dev = &hdma_dev->pdev->dev;
836 	INIT_LIST_HEAD(&dma_dev->channels);
837 }
838 
839 /* --- debugfs implementation --- */
840 #ifdef CONFIG_DEBUG_FS
841 #include <linux/debugfs.h>
842 static struct debugfs_reg32 *hisi_dma_get_ch_regs(struct hisi_dma_dev *hdma_dev,
843 						  u32 *regs_sz)
844 {
845 	struct device *dev = &hdma_dev->pdev->dev;
846 	struct debugfs_reg32 *regs;
847 	u32 regs_sz_comm;
848 
849 	regs_sz_comm = ARRAY_SIZE(hisi_dma_comm_chan_regs);
850 
851 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
852 		*regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip08_chan_regs);
853 	else
854 		*regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip09_chan_regs);
855 
856 	regs = devm_kcalloc(dev, *regs_sz, sizeof(struct debugfs_reg32),
857 			    GFP_KERNEL);
858 	if (!regs)
859 		return NULL;
860 	memcpy(regs, hisi_dma_comm_chan_regs, sizeof(hisi_dma_comm_chan_regs));
861 
862 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
863 		memcpy(regs + regs_sz_comm, hisi_dma_hip08_chan_regs,
864 		       sizeof(hisi_dma_hip08_chan_regs));
865 	else
866 		memcpy(regs + regs_sz_comm, hisi_dma_hip09_chan_regs,
867 		       sizeof(hisi_dma_hip09_chan_regs));
868 
869 	return regs;
870 }
871 
872 static int hisi_dma_create_chan_dir(struct hisi_dma_dev *hdma_dev)
873 {
874 	char dir_name[HISI_DMA_MAX_DIR_NAME_LEN];
875 	struct debugfs_regset32 *regsets;
876 	struct debugfs_reg32 *regs;
877 	struct dentry *chan_dir;
878 	struct device *dev;
879 	u32 regs_sz;
880 	int ret;
881 	int i;
882 
883 	dev = &hdma_dev->pdev->dev;
884 
885 	regsets = devm_kcalloc(dev, hdma_dev->chan_num,
886 			       sizeof(*regsets), GFP_KERNEL);
887 	if (!regsets)
888 		return -ENOMEM;
889 
890 	regs = hisi_dma_get_ch_regs(hdma_dev, &regs_sz);
891 	if (!regs)
892 		return -ENOMEM;
893 
894 	for (i = 0; i < hdma_dev->chan_num; i++) {
895 		regsets[i].regs = regs;
896 		regsets[i].nregs = regs_sz;
897 		regsets[i].base = hdma_dev->queue_base + i * HISI_DMA_Q_OFFSET;
898 		regsets[i].dev = dev;
899 
900 		memset(dir_name, 0, HISI_DMA_MAX_DIR_NAME_LEN);
901 		ret = sprintf(dir_name, "channel%d", i);
902 		if (ret < 0)
903 			return ret;
904 
905 		chan_dir = debugfs_create_dir(dir_name,
906 					      hdma_dev->dma_dev.dbg_dev_root);
907 		debugfs_create_regset32("regs", 0444, chan_dir, &regsets[i]);
908 	}
909 
910 	return 0;
911 }
912 
913 static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev)
914 {
915 	struct debugfs_regset32 *regset;
916 	struct device *dev;
917 	int ret;
918 
919 	dev = &hdma_dev->pdev->dev;
920 
921 	if (hdma_dev->dma_dev.dbg_dev_root == NULL)
922 		return;
923 
924 	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
925 	if (!regset)
926 		return;
927 
928 	if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
929 		regset->regs = hisi_dma_hip08_comm_regs;
930 		regset->nregs = ARRAY_SIZE(hisi_dma_hip08_comm_regs);
931 	} else {
932 		regset->regs = hisi_dma_hip09_comm_regs;
933 		regset->nregs = ARRAY_SIZE(hisi_dma_hip09_comm_regs);
934 	}
935 	regset->base = hdma_dev->base;
936 	regset->dev = dev;
937 
938 	debugfs_create_regset32("regs", 0444,
939 				hdma_dev->dma_dev.dbg_dev_root, regset);
940 
941 	ret = hisi_dma_create_chan_dir(hdma_dev);
942 	if (ret < 0)
943 		dev_info(&hdma_dev->pdev->dev, "fail to create debugfs for channels!\n");
944 }
945 #else
946 static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev) { }
947 #endif /* CONFIG_DEBUG_FS*/
948 /* --- debugfs implementation --- */
949 
950 static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
951 {
952 	enum hisi_dma_reg_layout reg_layout;
953 	struct device *dev = &pdev->dev;
954 	struct hisi_dma_dev *hdma_dev;
955 	struct dma_device *dma_dev;
956 	u32 chan_num;
957 	u32 msi_num;
958 	int ret;
959 
960 	reg_layout = hisi_dma_get_reg_layout(pdev);
961 	if (reg_layout == HISI_DMA_REG_LAYOUT_INVALID) {
962 		dev_err(dev, "unsupported device!\n");
963 		return -EINVAL;
964 	}
965 
966 	ret = pcim_enable_device(pdev);
967 	if (ret) {
968 		dev_err(dev, "failed to enable device mem!\n");
969 		return ret;
970 	}
971 
972 	ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev));
973 	if (ret) {
974 		dev_err(dev, "failed to remap I/O region!\n");
975 		return ret;
976 	}
977 
978 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
979 	if (ret)
980 		return ret;
981 
982 	chan_num = hisi_dma_get_chan_num(pdev);
983 	hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, chan_num),
984 				GFP_KERNEL);
985 	if (!hdma_dev)
986 		return -EINVAL;
987 
988 	hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
989 	hdma_dev->pdev = pdev;
990 	hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
991 	hdma_dev->chan_num = chan_num;
992 	hdma_dev->reg_layout = reg_layout;
993 	hdma_dev->queue_base = hdma_dev->base + hisi_dma_get_queue_base(pdev);
994 
995 	pci_set_drvdata(pdev, hdma_dev);
996 	pci_set_master(pdev);
997 
998 	msi_num = hisi_dma_get_msi_num(pdev);
999 
1000 	/* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
1001 	ret = pci_alloc_irq_vectors(pdev, msi_num, msi_num, PCI_IRQ_MSI);
1002 	if (ret < 0) {
1003 		dev_err(dev, "Failed to allocate MSI vectors!\n");
1004 		return ret;
1005 	}
1006 
1007 	hisi_dma_init_dma_dev(hdma_dev);
1008 
1009 	hisi_dma_set_mode(hdma_dev, RC);
1010 
1011 	hisi_dma_init_hw(hdma_dev);
1012 
1013 	ret = hisi_dma_enable_hw_channels(hdma_dev);
1014 	if (ret < 0) {
1015 		dev_err(dev, "failed to enable hw channel!\n");
1016 		return ret;
1017 	}
1018 
1019 	ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels,
1020 				       hdma_dev);
1021 	if (ret)
1022 		return ret;
1023 
1024 	dma_dev = &hdma_dev->dma_dev;
1025 	ret = dmaenginem_async_device_register(dma_dev);
1026 	if (ret < 0) {
1027 		dev_err(dev, "failed to register device!\n");
1028 		return ret;
1029 	}
1030 
1031 	hisi_dma_create_debugfs(hdma_dev);
1032 
1033 	return 0;
1034 }
1035 
1036 static const struct pci_device_id hisi_dma_pci_tbl[] = {
1037 	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) },
1038 	{ 0, }
1039 };
1040 
1041 static struct pci_driver hisi_dma_pci_driver = {
1042 	.name		= "hisi_dma",
1043 	.id_table	= hisi_dma_pci_tbl,
1044 	.probe		= hisi_dma_probe,
1045 };
1046 
1047 module_pci_driver(hisi_dma_pci_driver);
1048 
1049 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
1050 MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>");
1051 MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver");
1052 MODULE_LICENSE("GPL v2");
1053 MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl);
1054