xref: /linux/drivers/dma/amd/qdma/qdma.c (revision 23db0ed34f9e3756d243c5dc56d9f7c1fadecf89)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DMA driver for AMD Queue-based DMA Subsystem
4  *
5  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
6  */
7 #include <linux/bitfield.h>
8 #include <linux/bitops.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/module.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/platform_device.h>
14 #include <linux/platform_data/amd_qdma.h>
15 #include <linux/regmap.h>
16 
17 #include "qdma.h"
18 
19 #define CHAN_STR(q)		(((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
20 #define QDMA_REG_OFF(d, r)	((d)->roffs[r].off)
21 
22 /* MMIO regmap config for all QDMA registers */
23 static const struct regmap_config qdma_regmap_config = {
24 	.reg_bits = 32,
25 	.val_bits = 32,
26 	.reg_stride = 4,
27 };
28 
to_qdma_queue(struct dma_chan * chan)29 static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
30 {
31 	return container_of(chan, struct qdma_queue, vchan.chan);
32 }
33 
to_qdma_vdesc(struct virt_dma_desc * vdesc)34 static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
35 {
36 	return container_of(vdesc, struct qdma_mm_vdesc, vdesc);
37 }
38 
qdma_get_intr_ring_idx(struct qdma_device * qdev)39 static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
40 {
41 	u32 idx;
42 
43 	idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx;
44 	qdev->qintr_ring_idx %= qdev->qintr_ring_num;
45 
46 	return idx;
47 }
48 
qdma_get_field(const struct qdma_device * qdev,const u32 * data,enum qdma_reg_fields field)49 static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
50 			  enum qdma_reg_fields field)
51 {
52 	const struct qdma_reg_field *f = &qdev->rfields[field];
53 	u16 low_pos, hi_pos, low_bit, hi_bit;
54 	u64 value = 0, mask;
55 
56 	low_pos = f->lsb / BITS_PER_TYPE(*data);
57 	hi_pos = f->msb / BITS_PER_TYPE(*data);
58 
59 	if (low_pos == hi_pos) {
60 		low_bit = f->lsb % BITS_PER_TYPE(*data);
61 		hi_bit = f->msb % BITS_PER_TYPE(*data);
62 		mask = GENMASK(hi_bit, low_bit);
63 		value = (data[low_pos] & mask) >> low_bit;
64 	} else if (hi_pos == low_pos + 1) {
65 		low_bit = f->lsb % BITS_PER_TYPE(*data);
66 		hi_bit = low_bit + (f->msb - f->lsb);
67 		value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) |
68 			data[low_pos];
69 		mask = GENMASK_ULL(hi_bit, low_bit);
70 		value = (value & mask) >> low_bit;
71 	} else {
72 		hi_bit = f->msb % BITS_PER_TYPE(*data);
73 		mask = GENMASK(hi_bit, 0);
74 		value = data[hi_pos] & mask;
75 		low_bit = f->msb - f->lsb - hi_bit;
76 		value <<= low_bit;
77 		low_bit -= 32;
78 		value |= (u64)data[hi_pos - 1] << low_bit;
79 		mask = GENMASK(31, 32 - low_bit);
80 		value |= (data[hi_pos - 2] & mask) >> low_bit;
81 	}
82 
83 	return value;
84 }
85 
qdma_set_field(const struct qdma_device * qdev,u32 * data,enum qdma_reg_fields field,u64 value)86 static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
87 			   enum qdma_reg_fields field, u64 value)
88 {
89 	const struct qdma_reg_field *f = &qdev->rfields[field];
90 	u16 low_pos, hi_pos, low_bit;
91 
92 	low_pos = f->lsb / BITS_PER_TYPE(*data);
93 	hi_pos = f->msb / BITS_PER_TYPE(*data);
94 	low_bit = f->lsb % BITS_PER_TYPE(*data);
95 
96 	data[low_pos++] |= value << low_bit;
97 	if (low_pos <= hi_pos)
98 		data[low_pos++] |= (u32)(value >> (32 - low_bit));
99 	if (low_pos <= hi_pos)
100 		data[low_pos] |= (u32)(value >> (64 - low_bit));
101 }
102 
qdma_reg_write(const struct qdma_device * qdev,const u32 * data,enum qdma_regs reg)103 static inline int qdma_reg_write(const struct qdma_device *qdev,
104 				 const u32 *data, enum qdma_regs reg)
105 {
106 	const struct qdma_reg *r = &qdev->roffs[reg];
107 	int ret;
108 
109 	if (r->count > 1)
110 		ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count);
111 	else
112 		ret = regmap_write(qdev->regmap, r->off, *data);
113 
114 	return ret;
115 }
116 
qdma_reg_read(const struct qdma_device * qdev,u32 * data,enum qdma_regs reg)117 static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
118 				enum qdma_regs reg)
119 {
120 	const struct qdma_reg *r = &qdev->roffs[reg];
121 	int ret;
122 
123 	if (r->count > 1)
124 		ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count);
125 	else
126 		ret = regmap_read(qdev->regmap, r->off, data);
127 
128 	return ret;
129 }
130 
qdma_context_cmd_execute(const struct qdma_device * qdev,enum qdma_ctxt_type type,enum qdma_ctxt_cmd cmd,u16 index)131 static int qdma_context_cmd_execute(const struct qdma_device *qdev,
132 				    enum qdma_ctxt_type type,
133 				    enum qdma_ctxt_cmd cmd, u16 index)
134 {
135 	u32 value = 0;
136 	int ret;
137 
138 	qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index);
139 	qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd);
140 	qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type);
141 
142 	ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD);
143 	if (ret)
144 		return ret;
145 
146 	ret = regmap_read_poll_timeout(qdev->regmap,
147 				       QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD),
148 				       value,
149 				       !qdma_get_field(qdev, &value,
150 						       QDMA_REGF_CMD_BUSY),
151 				       QDMA_POLL_INTRVL_US,
152 				       QDMA_POLL_TIMEOUT_US);
153 	if (ret) {
154 		qdma_err(qdev, "Context command execution timed out");
155 		return ret;
156 	}
157 
158 	return 0;
159 }
160 
qdma_context_write_data(const struct qdma_device * qdev,const u32 * data)161 static int qdma_context_write_data(const struct qdma_device *qdev,
162 				   const u32 *data)
163 {
164 	u32 mask[QDMA_CTXT_REGMAP_LEN];
165 	int ret;
166 
167 	memset(mask, ~0, sizeof(mask));
168 
169 	ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK);
170 	if (ret)
171 		return ret;
172 
173 	ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA);
174 	if (ret)
175 		return ret;
176 
177 	return 0;
178 }
179 
qdma_prep_sw_desc_context(const struct qdma_device * qdev,const struct qdma_ctxt_sw_desc * ctxt,u32 * data)180 static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
181 				      const struct qdma_ctxt_sw_desc *ctxt,
182 				      u32 *data)
183 {
184 	memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
185 	qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base);
186 	qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec);
187 	qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid);
188 
189 	qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B);
190 	qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID);
191 	qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM);
192 	qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1);
193 	qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1);
194 	qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1);
195 	qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1);
196 	qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1);
197 	qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1);
198 	qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1);
199 	qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1);
200 }
201 
qdma_prep_intr_context(const struct qdma_device * qdev,const struct qdma_ctxt_intr * ctxt,u32 * data)202 static void qdma_prep_intr_context(const struct qdma_device *qdev,
203 				   const struct qdma_ctxt_intr *ctxt,
204 				   u32 *data)
205 {
206 	memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
207 	qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base);
208 	qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec);
209 	qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size);
210 	qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid);
211 	qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color);
212 	qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid);
213 }
214 
qdma_prep_fmap_context(const struct qdma_device * qdev,const struct qdma_ctxt_fmap * ctxt,u32 * data)215 static void qdma_prep_fmap_context(const struct qdma_device *qdev,
216 				   const struct qdma_ctxt_fmap *ctxt,
217 				   u32 *data)
218 {
219 	memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
220 	qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase);
221 	qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax);
222 }
223 
224 /*
225  * Program the indirect context register space
226  *
227  * Once the queue is enabled, context is dynamically updated by hardware. Any
228  * modification of the context through this API when the queue is enabled can
229  * result in unexpected behavior. Reading the context when the queue is enabled
230  * is not recommended as it can result in reduced performance.
231  */
qdma_prog_context(struct qdma_device * qdev,enum qdma_ctxt_type type,enum qdma_ctxt_cmd cmd,u16 index,u32 * ctxt)232 static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
233 			     enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
234 {
235 	int ret;
236 
237 	mutex_lock(&qdev->ctxt_lock);
238 	if (cmd == QDMA_CTXT_WRITE) {
239 		ret = qdma_context_write_data(qdev, ctxt);
240 		if (ret)
241 			goto failed;
242 	}
243 
244 	ret = qdma_context_cmd_execute(qdev, type, cmd, index);
245 	if (ret)
246 		goto failed;
247 
248 	if (cmd == QDMA_CTXT_READ) {
249 		ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA);
250 		if (ret)
251 			goto failed;
252 	}
253 
254 failed:
255 	mutex_unlock(&qdev->ctxt_lock);
256 
257 	return ret;
258 }
259 
qdma_check_queue_status(struct qdma_device * qdev,enum dma_transfer_direction dir,u16 qid)260 static int qdma_check_queue_status(struct qdma_device *qdev,
261 				   enum dma_transfer_direction dir, u16 qid)
262 {
263 	u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0};
264 	enum qdma_ctxt_type type;
265 	int ret;
266 
267 	if (dir == DMA_MEM_TO_DEV)
268 		type = QDMA_CTXT_DESC_SW_H2C;
269 	else
270 		type = QDMA_CTXT_DESC_SW_C2H;
271 	ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
272 	if (ret)
273 		return ret;
274 
275 	status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE);
276 	if (status) {
277 		qdma_err(qdev, "queue %d already in use", qid);
278 		return -EBUSY;
279 	}
280 
281 	return 0;
282 }
283 
qdma_clear_queue_context(const struct qdma_queue * queue)284 static int qdma_clear_queue_context(const struct qdma_queue *queue)
285 {
286 	enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
287 					    QDMA_CTXT_DESC_HW_H2C,
288 					    QDMA_CTXT_DESC_CR_H2C,
289 					    QDMA_CTXT_PFTCH, };
290 	enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
291 					    QDMA_CTXT_DESC_HW_C2H,
292 					    QDMA_CTXT_DESC_CR_C2H,
293 					    QDMA_CTXT_PFTCH, };
294 	struct qdma_device *qdev = queue->qdev;
295 	enum qdma_ctxt_type *type;
296 	int ret, num, i;
297 
298 	if (queue->dir == DMA_MEM_TO_DEV) {
299 		type = h2c_types;
300 		num = ARRAY_SIZE(h2c_types);
301 	} else {
302 		type = c2h_types;
303 		num = ARRAY_SIZE(c2h_types);
304 	}
305 	for (i = 0; i < num; i++) {
306 		ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR,
307 					queue->qid, NULL);
308 		if (ret) {
309 			qdma_err(qdev, "Failed to clear ctxt %d", type[i]);
310 			return ret;
311 		}
312 	}
313 
314 	return 0;
315 }
316 
qdma_setup_fmap_context(struct qdma_device * qdev)317 static int qdma_setup_fmap_context(struct qdma_device *qdev)
318 {
319 	u32 ctxt[QDMA_CTXT_REGMAP_LEN];
320 	struct qdma_ctxt_fmap fmap;
321 	int ret;
322 
323 	ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR,
324 				qdev->fid, NULL);
325 	if (ret) {
326 		qdma_err(qdev, "Failed clearing context");
327 		return ret;
328 	}
329 
330 	fmap.qbase = 0;
331 	fmap.qmax = qdev->chan_num * 2;
332 	qdma_prep_fmap_context(qdev, &fmap, ctxt);
333 	ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE,
334 				qdev->fid, ctxt);
335 	if (ret)
336 		qdma_err(qdev, "Failed setup fmap, ret %d", ret);
337 
338 	return ret;
339 }
340 
qdma_setup_queue_context(struct qdma_device * qdev,const struct qdma_ctxt_sw_desc * sw_desc,enum dma_transfer_direction dir,u16 qid)341 static int qdma_setup_queue_context(struct qdma_device *qdev,
342 				    const struct qdma_ctxt_sw_desc *sw_desc,
343 				    enum dma_transfer_direction dir, u16 qid)
344 {
345 	u32 ctxt[QDMA_CTXT_REGMAP_LEN];
346 	enum qdma_ctxt_type type;
347 	int ret;
348 
349 	if (dir == DMA_MEM_TO_DEV)
350 		type = QDMA_CTXT_DESC_SW_H2C;
351 	else
352 		type = QDMA_CTXT_DESC_SW_C2H;
353 
354 	qdma_prep_sw_desc_context(qdev, sw_desc, ctxt);
355 	/* Setup SW descriptor context */
356 	ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
357 	if (ret)
358 		qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
359 
360 	return ret;
361 }
362 
363 /*
364  * Enable or disable memory-mapped DMA engines
365  * 1: enable, 0: disable
366  */
qdma_sgdma_control(struct qdma_device * qdev,u32 ctrl)367 static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
368 {
369 	int ret;
370 
371 	ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL);
372 	ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL);
373 
374 	return ret;
375 }
376 
qdma_get_hw_info(struct qdma_device * qdev)377 static int qdma_get_hw_info(struct qdma_device *qdev)
378 {
379 	struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
380 	u32 value = 0;
381 	int ret;
382 
383 	ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT);
384 	if (ret)
385 		return ret;
386 
387 	value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1;
388 	if (pdata->max_mm_channels * 2 > value) {
389 		qdma_err(qdev, "not enough hw queues %d", value);
390 		return -EINVAL;
391 	}
392 	qdev->chan_num = pdata->max_mm_channels;
393 
394 	ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID);
395 	if (ret)
396 		return ret;
397 
398 	qdma_info(qdev, "max channel %d, function id %d",
399 		  qdev->chan_num, qdev->fid);
400 
401 	return 0;
402 }
403 
qdma_update_pidx(const struct qdma_queue * queue,u16 pidx)404 static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
405 {
406 	struct qdma_device *qdev = queue->qdev;
407 
408 	return regmap_write(qdev->regmap, queue->pidx_reg,
409 			    pidx | QDMA_QUEUE_ARM_BIT);
410 }
411 
qdma_update_cidx(const struct qdma_queue * queue,u16 ridx,u16 cidx)412 static inline int qdma_update_cidx(const struct qdma_queue *queue,
413 				   u16 ridx, u16 cidx)
414 {
415 	struct qdma_device *qdev = queue->qdev;
416 
417 	return regmap_write(qdev->regmap, queue->cidx_reg,
418 			    ((u32)ridx << 16) | cidx);
419 }
420 
421 /**
422  * qdma_free_vdesc - Free descriptor
423  * @vdesc: Virtual DMA descriptor
424  */
qdma_free_vdesc(struct virt_dma_desc * vdesc)425 static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
426 {
427 	struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc);
428 
429 	kfree(vd);
430 }
431 
qdma_alloc_queues(struct qdma_device * qdev,enum dma_transfer_direction dir)432 static int qdma_alloc_queues(struct qdma_device *qdev,
433 			     enum dma_transfer_direction dir)
434 {
435 	struct qdma_queue *q, **queues;
436 	u32 i, pidx_base;
437 	int ret;
438 
439 	if (dir == DMA_MEM_TO_DEV) {
440 		queues = &qdev->h2c_queues;
441 		pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX);
442 	} else {
443 		queues = &qdev->c2h_queues;
444 		pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX);
445 	}
446 
447 	*queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
448 			       GFP_KERNEL);
449 	if (!*queues)
450 		return -ENOMEM;
451 
452 	for (i = 0; i < qdev->chan_num; i++) {
453 		ret = qdma_check_queue_status(qdev, dir, i);
454 		if (ret)
455 			return ret;
456 
457 		q = &(*queues)[i];
458 		q->ring_size = QDMA_DEFAULT_RING_SIZE;
459 		q->idx_mask = q->ring_size - 2;
460 		q->qdev = qdev;
461 		q->dir = dir;
462 		q->qid = i;
463 		q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
464 		q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
465 				i * QDMA_DMAP_REG_STRIDE;
466 		q->vchan.desc_free = qdma_free_vdesc;
467 		vchan_init(&q->vchan, &qdev->dma_dev);
468 	}
469 
470 	return 0;
471 }
472 
qdma_device_verify(struct qdma_device * qdev)473 static int qdma_device_verify(struct qdma_device *qdev)
474 {
475 	u32 value;
476 	int ret;
477 
478 	ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value);
479 	if (ret)
480 		return ret;
481 
482 	value = FIELD_GET(QDMA_IDENTIFIER_MASK, value);
483 	if (value != QDMA_IDENTIFIER) {
484 		qdma_err(qdev, "Invalid identifier");
485 		return -ENODEV;
486 	}
487 	qdev->rfields = qdma_regfs_default;
488 	qdev->roffs = qdma_regos_default;
489 
490 	return 0;
491 }
492 
qdma_device_setup(struct qdma_device * qdev)493 static int qdma_device_setup(struct qdma_device *qdev)
494 {
495 	u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
496 	int ret = 0;
497 
498 	ret = qdma_setup_fmap_context(qdev);
499 	if (ret) {
500 		qdma_err(qdev, "Failed setup fmap context");
501 		return ret;
502 	}
503 
504 	/* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */
505 	ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE);
506 	if (ret) {
507 		qdma_err(qdev, "Failed to setup ring %d of size %ld",
508 			 QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE);
509 		return ret;
510 	}
511 
512 	/* Enable memory-mapped DMA engine in both directions */
513 	ret = qdma_sgdma_control(qdev, 1);
514 	if (ret) {
515 		qdma_err(qdev, "Failed to SGDMA with error %d", ret);
516 		return ret;
517 	}
518 
519 	ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV);
520 	if (ret) {
521 		qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret);
522 		return ret;
523 	}
524 
525 	ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM);
526 	if (ret) {
527 		qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret);
528 		return ret;
529 	}
530 
531 	return 0;
532 }
533 
534 /**
535  * qdma_free_queue_resources() - Free queue resources
536  * @chan: DMA channel
537  */
qdma_free_queue_resources(struct dma_chan * chan)538 static void qdma_free_queue_resources(struct dma_chan *chan)
539 {
540 	struct qdma_queue *queue = to_qdma_queue(chan);
541 	struct qdma_device *qdev = queue->qdev;
542 	struct qdma_platdata *pdata;
543 
544 	qdma_clear_queue_context(queue);
545 	vchan_free_chan_resources(&queue->vchan);
546 	pdata = dev_get_platdata(&qdev->pdev->dev);
547 	dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
548 			  queue->desc_base, queue->dma_desc_base);
549 }
550 
551 /**
552  * qdma_alloc_queue_resources() - Allocate queue resources
553  * @chan: DMA channel
554  */
qdma_alloc_queue_resources(struct dma_chan * chan)555 static int qdma_alloc_queue_resources(struct dma_chan *chan)
556 {
557 	struct qdma_queue *queue = to_qdma_queue(chan);
558 	struct qdma_device *qdev = queue->qdev;
559 	struct qdma_ctxt_sw_desc desc;
560 	struct qdma_platdata *pdata;
561 	size_t size;
562 	int ret;
563 
564 	ret = qdma_clear_queue_context(queue);
565 	if (ret)
566 		return ret;
567 
568 	pdata = dev_get_platdata(&qdev->pdev->dev);
569 	size = queue->ring_size * QDMA_MM_DESC_SIZE;
570 	queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
571 					      &queue->dma_desc_base,
572 					      GFP_KERNEL);
573 	if (!queue->desc_base) {
574 		qdma_err(qdev, "Failed to allocate descriptor ring");
575 		return -ENOMEM;
576 	}
577 
578 	/* Setup SW descriptor queue context for DMA memory map */
579 	desc.vec = qdma_get_intr_ring_idx(qdev);
580 	desc.desc_base = queue->dma_desc_base;
581 	ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
582 	if (ret) {
583 		qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
584 			 chan->name);
585 		dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
586 				  queue->dma_desc_base);
587 		return ret;
588 	}
589 
590 	queue->pidx = 0;
591 	queue->cidx = 0;
592 
593 	return 0;
594 }
595 
qdma_filter_fn(struct dma_chan * chan,void * param)596 static bool qdma_filter_fn(struct dma_chan *chan, void *param)
597 {
598 	struct qdma_queue *queue = to_qdma_queue(chan);
599 	struct qdma_queue_info *info = param;
600 
601 	return info->dir == queue->dir;
602 }
603 
qdma_xfer_start(struct qdma_queue * queue)604 static int qdma_xfer_start(struct qdma_queue *queue)
605 {
606 	struct qdma_device *qdev = queue->qdev;
607 	int ret;
608 
609 	if (!vchan_next_desc(&queue->vchan))
610 		return 0;
611 
612 	qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d",
613 		 queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
614 
615 	ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx);
616 	if (ret) {
617 		qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d",
618 			 queue->pidx, CHAN_STR(queue), queue->qid);
619 	}
620 
621 	return ret;
622 }
623 
qdma_issue_pending(struct dma_chan * chan)624 static void qdma_issue_pending(struct dma_chan *chan)
625 {
626 	struct qdma_queue *queue = to_qdma_queue(chan);
627 	unsigned long flags;
628 
629 	spin_lock_irqsave(&queue->vchan.lock, flags);
630 	if (vchan_issue_pending(&queue->vchan)) {
631 		if (queue->submitted_vdesc) {
632 			queue->issued_vdesc = queue->submitted_vdesc;
633 			queue->submitted_vdesc = NULL;
634 		}
635 		qdma_xfer_start(queue);
636 	}
637 
638 	spin_unlock_irqrestore(&queue->vchan.lock, flags);
639 }
640 
qdma_get_desc(struct qdma_queue * q)641 static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
642 {
643 	struct qdma_mm_desc *desc;
644 
645 	if (((q->pidx + 1) & q->idx_mask) == q->cidx)
646 		return NULL;
647 
648 	desc = q->desc_base + q->pidx;
649 	q->pidx = (q->pidx + 1) & q->idx_mask;
650 
651 	return desc;
652 }
653 
qdma_hw_enqueue(struct qdma_queue * q,struct qdma_mm_vdesc * vdesc)654 static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
655 {
656 	struct qdma_mm_desc *desc;
657 	struct scatterlist *sg;
658 	u64 addr, *src, *dst;
659 	u32 rest, len;
660 	int ret = 0;
661 	u32 i;
662 
663 	if (!vdesc->sg_len)
664 		return 0;
665 
666 	if (q->dir == DMA_MEM_TO_DEV) {
667 		dst = &vdesc->dev_addr;
668 		src = &addr;
669 	} else {
670 		dst = &addr;
671 		src = &vdesc->dev_addr;
672 	}
673 
674 	for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) {
675 		addr = sg_dma_address(sg) + vdesc->sg_off;
676 		rest = sg_dma_len(sg) - vdesc->sg_off;
677 		while (rest) {
678 			len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN);
679 			desc = qdma_get_desc(q);
680 			if (!desc) {
681 				ret = -EBUSY;
682 				goto out;
683 			}
684 
685 			desc->src_addr = cpu_to_le64(*src);
686 			desc->dst_addr = cpu_to_le64(*dst);
687 			desc->len = cpu_to_le32(len);
688 
689 			vdesc->dev_addr += len;
690 			vdesc->sg_off += len;
691 			vdesc->pending_descs++;
692 			addr += len;
693 			rest -= len;
694 		}
695 		vdesc->sg_off = 0;
696 	}
697 out:
698 	vdesc->sg_len -= i;
699 	vdesc->pidx = q->pidx;
700 	return ret;
701 }
702 
qdma_fill_pending_vdesc(struct qdma_queue * q)703 static void qdma_fill_pending_vdesc(struct qdma_queue *q)
704 {
705 	struct virt_dma_chan *vc = &q->vchan;
706 	struct qdma_mm_vdesc *vdesc = NULL;
707 	struct virt_dma_desc *vd;
708 	int ret;
709 
710 	if (!list_empty(&vc->desc_issued)) {
711 		vd = &q->issued_vdesc->vdesc;
712 		list_for_each_entry_from(vd, &vc->desc_issued, node) {
713 			vdesc = to_qdma_vdesc(vd);
714 			ret = qdma_hw_enqueue(q, vdesc);
715 			if (ret) {
716 				q->issued_vdesc = vdesc;
717 				return;
718 			}
719 		}
720 		q->issued_vdesc = vdesc;
721 	}
722 
723 	if (list_empty(&vc->desc_submitted))
724 		return;
725 
726 	if (q->submitted_vdesc)
727 		vd = &q->submitted_vdesc->vdesc;
728 	else
729 		vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
730 
731 	list_for_each_entry_from(vd, &vc->desc_submitted, node) {
732 		vdesc = to_qdma_vdesc(vd);
733 		ret = qdma_hw_enqueue(q, vdesc);
734 		if (ret)
735 			break;
736 	}
737 	q->submitted_vdesc = vdesc;
738 }
739 
qdma_tx_submit(struct dma_async_tx_descriptor * tx)740 static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
741 {
742 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
743 	struct qdma_queue *q = to_qdma_queue(&vc->chan);
744 	struct virt_dma_desc *vd;
745 	unsigned long flags;
746 	dma_cookie_t cookie;
747 
748 	vd = container_of(tx, struct virt_dma_desc, tx);
749 	spin_lock_irqsave(&vc->lock, flags);
750 	cookie = dma_cookie_assign(tx);
751 
752 	list_move_tail(&vd->node, &vc->desc_submitted);
753 	qdma_fill_pending_vdesc(q);
754 	spin_unlock_irqrestore(&vc->lock, flags);
755 
756 	return cookie;
757 }
758 
759 static struct dma_async_tx_descriptor *
qdma_prep_device_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)760 qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
761 		    unsigned int sg_len, enum dma_transfer_direction dir,
762 		    unsigned long flags, void *context)
763 {
764 	struct qdma_queue *q = to_qdma_queue(chan);
765 	struct dma_async_tx_descriptor *tx;
766 	struct qdma_mm_vdesc *vdesc;
767 
768 	vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT);
769 	if (!vdesc)
770 		return NULL;
771 	vdesc->sgl = sgl;
772 	vdesc->sg_len = sg_len;
773 	if (dir == DMA_MEM_TO_DEV)
774 		vdesc->dev_addr = q->cfg.dst_addr;
775 	else
776 		vdesc->dev_addr = q->cfg.src_addr;
777 
778 	tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
779 	tx->tx_submit = qdma_tx_submit;
780 
781 	return tx;
782 }
783 
qdma_device_config(struct dma_chan * chan,struct dma_slave_config * cfg)784 static int qdma_device_config(struct dma_chan *chan,
785 			      struct dma_slave_config *cfg)
786 {
787 	struct qdma_queue *q = to_qdma_queue(chan);
788 
789 	memcpy(&q->cfg, cfg, sizeof(*cfg));
790 
791 	return 0;
792 }
793 
qdma_arm_err_intr(const struct qdma_device * qdev)794 static int qdma_arm_err_intr(const struct qdma_device *qdev)
795 {
796 	u32 value = 0;
797 
798 	qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid);
799 	qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx);
800 	qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1);
801 
802 	return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT);
803 }
804 
qdma_error_isr(int irq,void * data)805 static irqreturn_t qdma_error_isr(int irq, void *data)
806 {
807 	struct qdma_device *qdev = data;
808 	u32 err_stat = 0;
809 	int ret;
810 
811 	ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT);
812 	if (ret) {
813 		qdma_err(qdev, "read error state failed, ret %d", ret);
814 		goto out;
815 	}
816 
817 	qdma_err(qdev, "global error %d", err_stat);
818 	ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT);
819 	if (ret)
820 		qdma_err(qdev, "clear error state failed, ret %d", ret);
821 
822 out:
823 	qdma_arm_err_intr(qdev);
824 	return IRQ_HANDLED;
825 }
826 
qdma_queue_isr(int irq,void * data)827 static irqreturn_t qdma_queue_isr(int irq, void *data)
828 {
829 	struct qdma_intr_ring *intr = data;
830 	struct qdma_queue *q = NULL;
831 	struct qdma_device *qdev;
832 	u32 index, comp_desc;
833 	u64 intr_ent;
834 	u8 color;
835 	int ret;
836 	u16 qid;
837 
838 	qdev = intr->qdev;
839 	index = intr->cidx;
840 	while (1) {
841 		struct virt_dma_desc *vd;
842 		struct qdma_mm_vdesc *vdesc;
843 		unsigned long flags;
844 		u32 cidx;
845 
846 		intr_ent = le64_to_cpu(intr->base[index]);
847 		color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent);
848 		if (color != intr->color)
849 			break;
850 
851 		qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
852 		if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent))
853 			q = qdev->c2h_queues;
854 		else
855 			q = qdev->h2c_queues;
856 		q += qid;
857 
858 		cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent);
859 
860 		spin_lock_irqsave(&q->vchan.lock, flags);
861 		comp_desc = (cidx - q->cidx) & q->idx_mask;
862 
863 		vd = vchan_next_desc(&q->vchan);
864 		if (!vd)
865 			goto skip;
866 
867 		vdesc = to_qdma_vdesc(vd);
868 		while (comp_desc > vdesc->pending_descs) {
869 			list_del(&vd->node);
870 			vchan_cookie_complete(vd);
871 			comp_desc -= vdesc->pending_descs;
872 			vd = vchan_next_desc(&q->vchan);
873 			vdesc = to_qdma_vdesc(vd);
874 		}
875 		vdesc->pending_descs -= comp_desc;
876 		if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) {
877 			list_del(&vd->node);
878 			vchan_cookie_complete(vd);
879 		}
880 		q->cidx = cidx;
881 
882 		qdma_fill_pending_vdesc(q);
883 		qdma_xfer_start(q);
884 
885 skip:
886 		spin_unlock_irqrestore(&q->vchan.lock, flags);
887 
888 		/*
889 		 * Wrap the index value and flip the expected color value if
890 		 * interrupt aggregation PIDX has wrapped around.
891 		 */
892 		index++;
893 		index &= QDMA_INTR_RING_IDX_MASK;
894 		if (!index)
895 			intr->color = !intr->color;
896 	}
897 
898 	/*
899 	 * Update the software interrupt aggregation ring CIDX if a valid entry
900 	 * was found.
901 	 */
902 	if (q) {
903 		qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index);
904 
905 		/*
906 		 * Record the last read index of status descriptor from the
907 		 * interrupt aggregation ring.
908 		 */
909 		intr->cidx = index;
910 
911 		ret = qdma_update_cidx(q, intr->ridx, index);
912 		if (ret) {
913 			qdma_err(qdev, "Failed to update IRQ CIDX");
914 			return IRQ_NONE;
915 		}
916 	}
917 
918 	return IRQ_HANDLED;
919 }
920 
qdma_init_error_irq(struct qdma_device * qdev)921 static int qdma_init_error_irq(struct qdma_device *qdev)
922 {
923 	struct device *dev = &qdev->pdev->dev;
924 	int ret;
925 	u32 vec;
926 
927 	vec = qdev->queue_irq_start - 1;
928 
929 	ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr,
930 					IRQF_ONESHOT, "amd-qdma-error", qdev);
931 	if (ret) {
932 		qdma_err(qdev, "Failed to request error IRQ vector: %d", vec);
933 		return ret;
934 	}
935 
936 	ret = qdma_arm_err_intr(qdev);
937 	if (ret)
938 		qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret);
939 
940 	return ret;
941 }
942 
qdmam_alloc_qintr_rings(struct qdma_device * qdev)943 static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
944 {
945 	struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
946 	struct device *dev = &qdev->pdev->dev;
947 	u32 ctxt[QDMA_CTXT_REGMAP_LEN];
948 	struct qdma_intr_ring *ring;
949 	struct qdma_ctxt_intr intr_ctxt;
950 	u32 vector;
951 	int ret, i;
952 
953 	qdev->qintr_ring_num = qdev->queue_irq_num;
954 	qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num,
955 					 sizeof(*qdev->qintr_rings),
956 					 GFP_KERNEL);
957 	if (!qdev->qintr_rings)
958 		return -ENOMEM;
959 
960 	vector = qdev->queue_irq_start;
961 	for (i = 0; i < qdev->qintr_ring_num; i++, vector++) {
962 		ring = &qdev->qintr_rings[i];
963 		ring->qdev = qdev;
964 		ring->msix_id = qdev->err_irq_idx + i + 1;
965 		ring->ridx = i;
966 		ring->color = 1;
967 		ring->base = dmam_alloc_coherent(pdata->dma_dev,
968 						 QDMA_INTR_RING_SIZE,
969 						 &ring->dev_base, GFP_KERNEL);
970 		if (!ring->base) {
971 			qdma_err(qdev, "Failed to alloc intr ring %d", i);
972 			return -ENOMEM;
973 		}
974 		intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base);
975 		intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096;
976 		intr_ctxt.vec = ring->msix_id;
977 		intr_ctxt.valid = true;
978 		intr_ctxt.color = true;
979 		ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
980 					QDMA_CTXT_CLEAR, ring->ridx, NULL);
981 		if (ret) {
982 			qdma_err(qdev, "Failed clear intr ctx, ret %d", ret);
983 			return ret;
984 		}
985 
986 		qdma_prep_intr_context(qdev, &intr_ctxt, ctxt);
987 		ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
988 					QDMA_CTXT_WRITE, ring->ridx, ctxt);
989 		if (ret) {
990 			qdma_err(qdev, "Failed setup intr ctx, ret %d", ret);
991 			return ret;
992 		}
993 
994 		ret = devm_request_threaded_irq(dev, vector, NULL,
995 						qdma_queue_isr, IRQF_ONESHOT,
996 						"amd-qdma-queue", ring);
997 		if (ret) {
998 			qdma_err(qdev, "Failed to request irq %d", vector);
999 			return ret;
1000 		}
1001 	}
1002 
1003 	return 0;
1004 }
1005 
qdma_intr_init(struct qdma_device * qdev)1006 static int qdma_intr_init(struct qdma_device *qdev)
1007 {
1008 	int ret;
1009 
1010 	ret = qdma_init_error_irq(qdev);
1011 	if (ret) {
1012 		qdma_err(qdev, "Failed to init error IRQs, ret %d", ret);
1013 		return ret;
1014 	}
1015 
1016 	ret = qdmam_alloc_qintr_rings(qdev);
1017 	if (ret) {
1018 		qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret);
1019 		return ret;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
amd_qdma_remove(struct platform_device * pdev)1025 static void amd_qdma_remove(struct platform_device *pdev)
1026 {
1027 	struct qdma_device *qdev = platform_get_drvdata(pdev);
1028 
1029 	qdma_sgdma_control(qdev, 0);
1030 	dma_async_device_unregister(&qdev->dma_dev);
1031 
1032 	mutex_destroy(&qdev->ctxt_lock);
1033 }
1034 
amd_qdma_probe(struct platform_device * pdev)1035 static int amd_qdma_probe(struct platform_device *pdev)
1036 {
1037 	struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1038 	struct qdma_device *qdev;
1039 	struct resource *res;
1040 	void __iomem *regs;
1041 	int ret;
1042 
1043 	qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
1044 	if (!qdev)
1045 		return -ENOMEM;
1046 
1047 	platform_set_drvdata(pdev, qdev);
1048 	qdev->pdev = pdev;
1049 	mutex_init(&qdev->ctxt_lock);
1050 
1051 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1052 	if (!res) {
1053 		qdma_err(qdev, "Failed to get IRQ resource");
1054 		ret = -ENODEV;
1055 		goto failed;
1056 	}
1057 	qdev->err_irq_idx = pdata->irq_index;
1058 	qdev->queue_irq_start = res->start + 1;
1059 	qdev->queue_irq_num = resource_size(res) - 1;
1060 
1061 	regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
1062 	if (IS_ERR(regs)) {
1063 		ret = PTR_ERR(regs);
1064 		qdma_err(qdev, "Failed to map IO resource, err %d", ret);
1065 		goto failed;
1066 	}
1067 
1068 	qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
1069 					     &qdma_regmap_config);
1070 	if (IS_ERR(qdev->regmap)) {
1071 		ret = PTR_ERR(qdev->regmap);
1072 		qdma_err(qdev, "Regmap init failed, err %d", ret);
1073 		goto failed;
1074 	}
1075 
1076 	ret = qdma_device_verify(qdev);
1077 	if (ret)
1078 		goto failed;
1079 
1080 	ret = qdma_get_hw_info(qdev);
1081 	if (ret)
1082 		goto failed;
1083 
1084 	INIT_LIST_HEAD(&qdev->dma_dev.channels);
1085 
1086 	ret = qdma_device_setup(qdev);
1087 	if (ret)
1088 		goto failed;
1089 
1090 	ret = qdma_intr_init(qdev);
1091 	if (ret) {
1092 		qdma_err(qdev, "Failed to initialize IRQs %d", ret);
1093 		goto failed_disable_engine;
1094 	}
1095 
1096 	dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask);
1097 	dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask);
1098 
1099 	qdev->dma_dev.dev = &pdev->dev;
1100 	qdev->dma_dev.filter.map = pdata->device_map;
1101 	qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2;
1102 	qdev->dma_dev.filter.fn = qdma_filter_fn;
1103 	qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources;
1104 	qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources;
1105 	qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg;
1106 	qdev->dma_dev.device_config = qdma_device_config;
1107 	qdev->dma_dev.device_issue_pending = qdma_issue_pending;
1108 	qdev->dma_dev.device_tx_status = dma_cookie_status;
1109 	qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1110 
1111 	ret = dma_async_device_register(&qdev->dma_dev);
1112 	if (ret) {
1113 		qdma_err(qdev, "Failed to register AMD QDMA: %d", ret);
1114 		goto failed_disable_engine;
1115 	}
1116 
1117 	return 0;
1118 
1119 failed_disable_engine:
1120 	qdma_sgdma_control(qdev, 0);
1121 failed:
1122 	mutex_destroy(&qdev->ctxt_lock);
1123 	qdma_err(qdev, "Failed to probe AMD QDMA driver");
1124 	return ret;
1125 }
1126 
1127 static struct platform_driver amd_qdma_driver = {
1128 	.driver		= {
1129 		.name = "amd-qdma",
1130 	},
1131 	.probe		= amd_qdma_probe,
1132 	.remove		= amd_qdma_remove,
1133 };
1134 
1135 module_platform_driver(amd_qdma_driver);
1136 
1137 MODULE_DESCRIPTION("AMD QDMA driver");
1138 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1139 MODULE_LICENSE("GPL");
1140