xref: /freebsd/sys/contrib/alpine-hal/al_hal_udma_main.c (revision a70cba95822f662d3f9da5119b6a0c433e8f70af)
1 /*-
2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 /**
38  *  @{
39  * @file   al_hal_udma_main.c
40  *
41  * @brief  Universal DMA HAL driver for main functions (initialization, data path)
42  *
43  */
44 
45 #include <al_hal_udma.h>
46 #include <al_hal_udma_config.h>
47 
48 #define AL_UDMA_Q_RST_TOUT	10000	/* Queue reset timeout [uSecs] */
49 
50 #define UDMA_STATE_IDLE		0x0
51 #define UDMA_STATE_NORMAL	0x1
52 #define UDMA_STATE_ABORT	0x2
53 #define UDMA_STATE_RESERVED	0x3
54 
55 const char *const al_udma_states_name[] = {
56 	"Disable",
57 	"Idle",
58 	"Normal",
59 	"Abort",
60 	"Reset"
61 };
62 
63 #define AL_UDMA_INITIAL_RING_ID	1
64 
65 /*  dma_q flags */
66 #define AL_UDMA_Q_FLAGS_IGNORE_RING_ID	AL_BIT(0)
67 #define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE	AL_BIT(1)
68 #define AL_UDMA_Q_FLAGS_EN_COMP_COAL	AL_BIT(2)
69 
70 
71 static void al_udma_set_defaults(struct al_udma *udma)
72 {
73 	uint32_t tmp;
74 	uint8_t rev_id = udma->rev_id;
75 
76 	if (udma->type == UDMA_TX) {
77 		struct unit_regs* tmp_unit_regs =
78 			(struct unit_regs*)udma->udma_regs;
79 
80 		/* Setting the data fifo depth to 4K (256 strips of 16B)
81 		 * This allows the UDMA to have 16 outstanding writes */
82 		if (rev_id >= AL_UDMA_REV_ID_2) {
83 			al_reg_write32_masked(&tmp_unit_regs->m2s.m2s_rd.data_cfg,
84 			      UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK,
85 			      256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
86 		}
87 
88 		if (rev_id == AL_UDMA_REV_ID_0)
89 			/* disable AXI timeout for M0*/
90 			al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 0);
91 		else
92 			/* set AXI timeout to 1M (~2.6 ms) */
93 			al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
94 
95 		al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
96 					, 0); /* Ack time out */
97 
98 
99 		if (rev_id == AL_UDMA_REV_ID_0) {
100 			tmp = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
101 			tmp &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
102 			tmp |= 4 << UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT;
103 			al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1
104 									, tmp);
105 		}
106 
107 	}
108 	if (udma->type == UDMA_RX) {
109 		al_reg_write32(
110 			&udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
111 					/* Ack time out */
112 
113 	}
114 }
115 /**
116  * misc queue configurations
117  *
118  * @param udma_q udma queue data structure
119  *
120  * @return 0
121  */
122 static int al_udma_q_config(struct al_udma_q *udma_q)
123 {
124 	uint32_t *reg_addr;
125 	uint32_t val;
126 
127 	if (udma_q->udma->type == UDMA_TX) {
128 		reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
129 
130 		val = al_reg_read32(reg_addr);
131 		// enable DMB
132 		val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
133 		al_reg_write32(reg_addr, val);
134 	}
135 	return 0;
136 }
137 
138 /**
139  * set the queue's completion configuration register
140  *
141  * @param udma_q udma queue data structure
142  *
143  * @return 0
144  */
145 static int al_udma_q_config_compl(struct al_udma_q *udma_q)
146 {
147 	uint32_t *reg_addr;
148 	uint32_t val;
149 
150 	if (udma_q->udma->type == UDMA_TX)
151 		reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
152 	else
153 		reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
154 
155 	val = al_reg_read32(reg_addr);
156 
157 	if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
158 		val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
159 	else
160 		val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
161 
162 	if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
163 		val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
164 	else
165 		val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
166 
167 	al_reg_write32(reg_addr, val);
168 
169 	/* set the completion queue size */
170 	if (udma_q->udma->type == UDMA_RX) {
171 		val = al_reg_read32(
172 				&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
173 		val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
174 		/* the register expects it to be in words */
175 		val |= (udma_q->cdesc_size >> 2)
176 				& UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
177 		al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
178 							, val);
179 	}
180 	return 0;
181 }
182 
183 /**
184  * reset the queues pointers (Head, Tail, etc) and set the base addresses
185  *
186  * @param udma_q udma queue data structure
187  */
188 static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
189 {
190 	/* reset the descriptors ring pointers */
191 	/* assert descriptor base address aligned. */
192 	al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
193 		   ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
194 	al_reg_write32(&udma_q->q_regs->rings.drbp_low,
195 		       AL_ADDR_LOW(udma_q->desc_phy_base));
196 	al_reg_write32(&udma_q->q_regs->rings.drbp_high,
197 		       AL_ADDR_HIGH(udma_q->desc_phy_base));
198 
199 	al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
200 
201 	/* if completion ring update disabled */
202 	if (udma_q->cdesc_base_ptr == NULL) {
203 		udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
204 	} else {
205 		/* reset the completion descriptors ring pointers */
206 		/* assert completion base address aligned. */
207 		al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
208 			   ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
209 		al_reg_write32(&udma_q->q_regs->rings.crbp_low,
210 			       AL_ADDR_LOW(udma_q->cdesc_phy_base));
211 		al_reg_write32(&udma_q->q_regs->rings.crbp_high,
212 			       AL_ADDR_HIGH(udma_q->cdesc_phy_base));
213 	}
214 	al_udma_q_config_compl(udma_q);
215 	return 0;
216 }
217 
218 /**
219  * enable/disable udma queue
220  *
221  * @param udma_q udma queue data structure
222  * @param enable none zero value enables the queue, zero means disable
223  *
224  * @return 0
225  */
226 static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
227 {
228 	uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
229 
230 	if (enable) {
231 		reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
232 		udma_q->status = AL_QUEUE_ENABLED;
233 	} else {
234 		reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
235 		udma_q->status = AL_QUEUE_DISABLED;
236 	}
237 	al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
238 	return 0;
239 }
240 
241 
242 /************************ API functions ***************************************/
243 
244 /* Initializations functions */
245 /*
246  * Initialize the udma engine
247  */
248 int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
249 {
250 	int i;
251 
252 	al_assert(udma);
253 
254 	if (udma_params->num_of_queues > DMA_MAX_Q) {
255 		al_err("udma: invalid num_of_queues parameter\n");
256 		return -EINVAL;
257 	}
258 
259 	udma->type = udma_params->type;
260 	udma->num_of_queues = udma_params->num_of_queues;
261 	udma->gen_regs = &udma_params->udma_regs_base->gen;
262 
263 	if (udma->type == UDMA_TX)
264 		udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->m2s;
265 	else
266 		udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->s2m;
267 
268 	udma->rev_id = al_udma_get_revision(udma_params->udma_regs_base);
269 
270 	if (udma_params->name == NULL)
271 		udma->name = "";
272 	else
273 		udma->name = udma_params->name;
274 
275 	udma->state = UDMA_DISABLE;
276 	for (i = 0; i < DMA_MAX_Q; i++) {
277 		udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
278 	}
279 	/* initialize configuration registers to correct values */
280 	al_udma_set_defaults(udma);
281 	al_dbg("udma [%s] initialized. base %p\n", udma->name,
282 		udma->udma_regs);
283 	return 0;
284 }
285 
286 /*
287  * Initialize the udma queue data structure
288  */
289 int al_udma_q_init(struct al_udma *udma, uint32_t qid,
290 					struct al_udma_q_params *q_params)
291 {
292 	struct al_udma_q *udma_q;
293 
294 	al_assert(udma);
295 	al_assert(q_params);
296 
297 	if (qid >= udma->num_of_queues) {
298 		al_err("udma: invalid queue id (%d)\n", qid);
299 		return -EINVAL;
300 	}
301 
302 	if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
303 		al_err("udma: queue (%d) already enabled!\n", qid);
304 		return -EIO;
305 	}
306 
307 	if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
308 		al_err("udma: queue (%d) size too small\n", qid);
309 		return -EINVAL;
310 	}
311 
312 	if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
313 		al_err("udma: queue (%d) size too large\n", qid);
314 		return -EINVAL;
315 	}
316 
317 	if (q_params->size & (q_params->size - 1)) {
318 		al_err("udma: queue (%d) size (%d) must be power of 2\n",
319 			 q_params->size, qid);
320 		return -EINVAL;
321 	}
322 
323 	udma_q = &udma->udma_q[qid];
324 	/* set the queue's regs base address */
325 	if (udma->type == UDMA_TX)
326 		udma_q->q_regs = (union udma_q_regs __iomem *)
327 					&udma->udma_regs->m2s.m2s_q[qid];
328 	else
329 		udma_q->q_regs = (union udma_q_regs __iomem *)
330 					&udma->udma_regs->s2m.s2m_q[qid];
331 
332 	udma_q->adapter_rev_id = q_params->adapter_rev_id;
333 	udma_q->size = q_params->size;
334 	udma_q->size_mask = q_params->size - 1;
335 	udma_q->desc_base_ptr = q_params->desc_base;
336 	udma_q->desc_phy_base = q_params->desc_phy_base;
337 	udma_q->cdesc_base_ptr = q_params->cdesc_base;
338 	udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
339 	udma_q->cdesc_size = q_params->cdesc_size;
340 
341 	udma_q->next_desc_idx = 0;
342 	udma_q->next_cdesc_idx = 0;
343 	udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
344 	    (udma_q->size - 1) * udma_q->cdesc_size;
345 	udma_q->comp_head_idx = 0;
346 	udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
347 	udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
348 	udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
349 #if 0
350 	udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
351 						AL_M2S_DESC_RING_ID_SHIFT;
352 #endif
353 	udma_q->pkt_crnt_descs = 0;
354 	udma_q->flags = 0;
355 	udma_q->status = AL_QUEUE_DISABLED;
356 	udma_q->udma = udma;
357 	udma_q->qid = qid;
358 
359 	/* start hardware configuration: */
360 	al_udma_q_config(udma_q);
361 	/* reset the queue pointers */
362 	al_udma_q_set_pointers(udma_q);
363 
364 	/* enable the q */
365 	al_udma_q_enable(udma_q, 1);
366 
367 	al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
368 			"  desc ring info: phys base 0x%llx virt base %p\n"
369 			"  cdesc ring info: phys base 0x%llx virt base %p "
370 				"entry size 0x%x",
371 			udma_q->udma->name, udma_q->qid,
372 			udma->type == UDMA_TX ? "Tx" : "Rx",
373 			q_params->size,
374 			(unsigned long long)q_params->desc_phy_base,
375 			q_params->desc_base,
376 			(unsigned long long)q_params->cdesc_phy_base,
377 			q_params->cdesc_base,
378 			q_params->cdesc_size);
379 
380 	return 0;
381 }
382 
383 /*
384  * Reset a udma queue
385  */
386 int al_udma_q_reset(struct al_udma_q *udma_q)
387 {
388 	unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
389 	uint32_t *status_reg;
390 	uint32_t *dcp_reg;
391 	uint32_t *crhp_reg;
392 	uint32_t *q_sw_ctrl_reg;
393 
394 	al_assert(udma_q);
395 
396 	/* De-assert scheduling and prefetch */
397 	al_udma_q_enable(udma_q, 0);
398 
399 	/* Wait for scheduling and prefetch to stop */
400 	status_reg = &udma_q->q_regs->rings.status;
401 
402 	while (remaining_time) {
403 		uint32_t status = al_reg_read32(status_reg);
404 
405 		if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
406 						UDMA_M2S_Q_STATUS_SCHEDULER)))
407 			break;
408 
409 		remaining_time--;
410 		al_udelay(1);
411 	}
412 
413 	if (!remaining_time) {
414 		al_err("udma [%s %d]: %s timeout waiting for prefetch and "
415 			"scheduler disable\n", udma_q->udma->name, udma_q->qid,
416 			__func__);
417 		return -ETIMEDOUT;
418 	}
419 
420 	/* Wait for the completion queue to reach to the same pointer as the
421 	 * prefetch stopped at ([TR]DCP == [TR]CRHP) */
422 	dcp_reg = &udma_q->q_regs->rings.dcp;
423 	crhp_reg = &udma_q->q_regs->rings.crhp;
424 
425 	while (remaining_time) {
426 		uint32_t dcp = al_reg_read32(dcp_reg);
427 		uint32_t crhp = al_reg_read32(crhp_reg);
428 
429 		if (dcp == crhp)
430 			break;
431 
432 		remaining_time--;
433 		al_udelay(1);
434 	};
435 
436 	if (!remaining_time) {
437 		al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
438 			udma_q->udma->name, udma_q->qid, __func__);
439 		return -ETIMEDOUT;
440 	}
441 
442 	/* Assert the queue reset */
443 	if (udma_q->udma->type == UDMA_TX)
444 		q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
445 	else
446 		q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
447 
448 	al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
449 
450 	return 0;
451 }
452 
453 /*
454  * return (by reference) a pointer to a specific queue date structure.
455  */
456 int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
457 						struct al_udma_q **q_handle)
458 {
459 
460 	al_assert(udma);
461 	al_assert(q_handle);
462 
463 	if (unlikely(qid >= udma->num_of_queues)) {
464 		al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
465 		return -EINVAL;
466 	}
467 	*q_handle = &udma->udma_q[qid];
468 	return 0;
469 }
470 
471 /*
472  * Change the UDMA's state
473  */
474 int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
475 {
476 	uint32_t reg;
477 
478 	al_assert(udma != NULL);
479 	if (state == udma->state)
480 		al_dbg("udma [%s]: requested state identical to "
481 			"current state (%d)\n", udma->name, state);
482 
483 	al_dbg("udma [%s]: change state from (%s) to (%s)\n",
484 		 udma->name, al_udma_states_name[udma->state],
485 		 al_udma_states_name[state]);
486 
487 	reg = 0;
488 	switch (state) {
489 	case UDMA_DISABLE:
490 		reg |= UDMA_M2S_CHANGE_STATE_DIS;
491 		break;
492 	case UDMA_NORMAL:
493 		reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
494 		break;
495 	case UDMA_ABORT:
496 		reg |= UDMA_M2S_CHANGE_STATE_ABORT;
497 		break;
498 	default:
499 		al_err("udma: invalid state (%d)\n", state);
500 		return -EINVAL;
501 	}
502 
503 	if (udma->type == UDMA_TX)
504 		al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
505 	else
506 		al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
507 
508 	udma->state = state;
509 	return 0;
510 }
511 
512 /*
513  * return the current UDMA hardware state
514  */
515 enum al_udma_state al_udma_state_get(struct al_udma *udma)
516 {
517 	uint32_t state_reg;
518 	uint32_t comp_ctrl;
519 	uint32_t stream_if;
520 	uint32_t data_rd;
521 	uint32_t desc_pref;
522 
523 	if (udma->type == UDMA_TX)
524 		state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
525 	else
526 		state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
527 
528 	comp_ctrl = AL_REG_FIELD_GET(state_reg,
529 				     UDMA_M2S_STATE_COMP_CTRL_MASK,
530 				     UDMA_M2S_STATE_COMP_CTRL_SHIFT);
531 	stream_if = AL_REG_FIELD_GET(state_reg,
532 				     UDMA_M2S_STATE_STREAM_IF_MASK,
533 				     UDMA_M2S_STATE_STREAM_IF_SHIFT);
534 	data_rd = AL_REG_FIELD_GET(state_reg,
535 				   UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
536 				   UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
537 	desc_pref = AL_REG_FIELD_GET(state_reg,
538 				     UDMA_M2S_STATE_DESC_PREF_MASK,
539 				     UDMA_M2S_STATE_DESC_PREF_SHIFT);
540 
541 	al_assert(comp_ctrl != UDMA_STATE_RESERVED);
542 	al_assert(stream_if != UDMA_STATE_RESERVED);
543 	al_assert(data_rd != UDMA_STATE_RESERVED);
544 	al_assert(desc_pref != UDMA_STATE_RESERVED);
545 
546 	/* if any of the states is abort then return abort */
547 	if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
548 			|| (data_rd == UDMA_STATE_ABORT)
549 			|| (desc_pref == UDMA_STATE_ABORT))
550 		return UDMA_ABORT;
551 
552 	/* if any of the states is normal then return normal */
553 	if ((comp_ctrl == UDMA_STATE_NORMAL)
554 			|| (stream_if == UDMA_STATE_NORMAL)
555 			|| (data_rd == UDMA_STATE_NORMAL)
556 			|| (desc_pref == UDMA_STATE_NORMAL))
557 		return UDMA_NORMAL;
558 
559 	return UDMA_IDLE;
560 }
561 
562 /*
563  * Action handling
564  */
565 
566 /*
567  * get next completed packet from completion ring of the queue
568  */
569 uint32_t al_udma_cdesc_packet_get(
570 	struct al_udma_q		*udma_q,
571 	volatile union al_udma_cdesc	**cdesc)
572 {
573 	uint32_t count;
574 	volatile union al_udma_cdesc *curr;
575 	uint32_t comp_flags;
576 
577 	/* this function requires the completion ring update */
578 	al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
579 
580 	/* comp_head points to the last comp desc that was processed */
581 	curr = udma_q->comp_head_ptr;
582 	comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
583 
584 	/* check if the completion descriptor is new */
585 	if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
586 		return 0;
587 	/* if new desc found, increment the current packets descriptors */
588 	count = udma_q->pkt_crnt_descs + 1;
589 	while (!cdesc_is_last(comp_flags)) {
590 		curr = al_cdesc_next_update(udma_q, curr);
591 		comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
592 		if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
593 								== AL_FALSE)) {
594 			/* the current packet here doesn't have all  */
595 			/* descriptors completed. log the current desc */
596 			/* location and number of completed descriptors so */
597 			/*  far. then return */
598 			udma_q->pkt_crnt_descs = count;
599 			udma_q->comp_head_ptr = curr;
600 			return 0;
601 		}
602 		count++;
603 		/* check against max descs per packet. */
604 		al_assert(count <= udma_q->size);
605 	}
606 	/* return back the first descriptor of the packet */
607 	*cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
608 	udma_q->pkt_crnt_descs = 0;
609 	udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
610 
611 	al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
612 		 " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
613 		 udma_q->next_cdesc_idx, count);
614 
615 	return count;
616 }
617 
618 /** @} end of UDMA group */
619