xref: /freebsd/sys/contrib/alpine-hal/al_hal_udma_main.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*-
2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 /**
38  *  @{
39  * @file   al_hal_udma_main.c
40  *
41  * @brief  Universal DMA HAL driver for main functions (initialization, data path)
42  *
43  */
44 
45 #include <al_hal_udma.h>
46 #include <al_hal_udma_config.h>
47 
48 #define AL_UDMA_Q_RST_TOUT	10000	/* Queue reset timeout [uSecs] */
49 
50 #define UDMA_STATE_IDLE		0x0
51 #define UDMA_STATE_NORMAL	0x1
52 #define UDMA_STATE_ABORT	0x2
53 #define UDMA_STATE_RESERVED	0x3
54 
55 const char *const al_udma_states_name[] = {
56 	"Disable",
57 	"Idle",
58 	"Normal",
59 	"Abort",
60 	"Reset"
61 };
62 
63 #define AL_UDMA_INITIAL_RING_ID	1
64 
65 /*  dma_q flags */
66 #define AL_UDMA_Q_FLAGS_IGNORE_RING_ID	AL_BIT(0)
67 #define AL_UDMA_Q_FLAGS_NO_COMP_UPDATE	AL_BIT(1)
68 #define AL_UDMA_Q_FLAGS_EN_COMP_COAL	AL_BIT(2)
69 
70 
71 static void al_udma_set_defaults(struct al_udma *udma)
72 {
73 	uint8_t rev_id = udma->rev_id;
74 
75 	if (udma->type == UDMA_TX) {
76 		struct unit_regs* tmp_unit_regs =
77 			(struct unit_regs*)udma->udma_regs;
78 
79 		/* Setting the data fifo depth to 4K (256 strips of 16B)
80 		 * This allows the UDMA to have 16 outstanding writes */
81 		if (rev_id >= AL_UDMA_REV_ID_2) {
82 			al_reg_write32_masked(&tmp_unit_regs->m2s.m2s_rd.data_cfg,
83 			      UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK,
84 			      256 << UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_SHIFT);
85 		}
86 
87 		/* set AXI timeout to 1M (~2.6 ms) */
88 		al_reg_write32(&tmp_unit_regs->gen.axi.cfg_1, 1000000);
89 
90 		al_reg_write32(&tmp_unit_regs->m2s.m2s_comp.cfg_application_ack
91 					, 0); /* Ack time out */
92 	}
93 	if (udma->type == UDMA_RX) {
94 		al_reg_write32(
95 			&udma->udma_regs->s2m.s2m_comp.cfg_application_ack, 0);
96 					/* Ack time out */
97 
98 	}
99 }
100 /**
101  * misc queue configurations
102  *
103  * @param udma_q udma queue data structure
104  *
105  * @return 0
106  */
107 static int al_udma_q_config(struct al_udma_q *udma_q)
108 {
109 	uint32_t *reg_addr;
110 	uint32_t val;
111 
112 	if (udma_q->udma->type == UDMA_TX) {
113 		reg_addr = &udma_q->q_regs->m2s_q.rlimit.mask;
114 
115 		val = al_reg_read32(reg_addr);
116 		// enable DMB
117 		val &= ~UDMA_M2S_Q_RATE_LIMIT_MASK_INTERNAL_PAUSE_DMB;
118 		al_reg_write32(reg_addr, val);
119 	}
120 	return 0;
121 }
122 
123 /**
124  * set the queue's completion configuration register
125  *
126  * @param udma_q udma queue data structure
127  *
128  * @return 0
129  */
130 static int al_udma_q_config_compl(struct al_udma_q *udma_q)
131 {
132 	uint32_t *reg_addr;
133 	uint32_t val;
134 
135 	if (udma_q->udma->type == UDMA_TX)
136 		reg_addr = &udma_q->q_regs->m2s_q.comp_cfg;
137 	else
138 		reg_addr = &udma_q->q_regs->s2m_q.comp_cfg;
139 
140 	val = al_reg_read32(reg_addr);
141 
142 	if (udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE)
143 		val &= ~UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
144 	else
145 		val |= UDMA_M2S_Q_COMP_CFG_EN_COMP_RING_UPDATE;
146 
147 	if (udma_q->flags & AL_UDMA_Q_FLAGS_EN_COMP_COAL)
148 		val &= ~UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
149 	else
150 		val |= UDMA_M2S_Q_COMP_CFG_DIS_COMP_COAL;
151 
152 	al_reg_write32(reg_addr, val);
153 
154 	/* set the completion queue size */
155 	if (udma_q->udma->type == UDMA_RX) {
156 		val = al_reg_read32(
157 				&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c);
158 		val &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
159 		/* the register expects it to be in words */
160 		val |= (udma_q->cdesc_size >> 2)
161 				& UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
162 		al_reg_write32(&udma_q->udma->udma_regs->s2m.s2m_comp.cfg_1c
163 							, val);
164 	}
165 	return 0;
166 }
167 
168 /**
169  * reset the queues pointers (Head, Tail, etc) and set the base addresses
170  *
171  * @param udma_q udma queue data structure
172  */
173 static int al_udma_q_set_pointers(struct al_udma_q *udma_q)
174 {
175 	/* reset the descriptors ring pointers */
176 	/* assert descriptor base address aligned. */
177 	al_assert((AL_ADDR_LOW(udma_q->desc_phy_base) &
178 		   ~UDMA_M2S_Q_TDRBP_LOW_ADDR_MASK) == 0);
179 	al_reg_write32(&udma_q->q_regs->rings.drbp_low,
180 		       AL_ADDR_LOW(udma_q->desc_phy_base));
181 	al_reg_write32(&udma_q->q_regs->rings.drbp_high,
182 		       AL_ADDR_HIGH(udma_q->desc_phy_base));
183 
184 	al_reg_write32(&udma_q->q_regs->rings.drl, udma_q->size);
185 
186 	/* if completion ring update disabled */
187 	if (udma_q->cdesc_base_ptr == NULL) {
188 		udma_q->flags |= AL_UDMA_Q_FLAGS_NO_COMP_UPDATE;
189 	} else {
190 		/* reset the completion descriptors ring pointers */
191 		/* assert completion base address aligned. */
192 		al_assert((AL_ADDR_LOW(udma_q->cdesc_phy_base) &
193 			   ~UDMA_M2S_Q_TCRBP_LOW_ADDR_MASK) == 0);
194 		al_reg_write32(&udma_q->q_regs->rings.crbp_low,
195 			       AL_ADDR_LOW(udma_q->cdesc_phy_base));
196 		al_reg_write32(&udma_q->q_regs->rings.crbp_high,
197 			       AL_ADDR_HIGH(udma_q->cdesc_phy_base));
198 	}
199 	al_udma_q_config_compl(udma_q);
200 	return 0;
201 }
202 
203 /**
204  * enable/disable udma queue
205  *
206  * @param udma_q udma queue data structure
207  * @param enable none zero value enables the queue, zero means disable
208  *
209  * @return 0
210  */
211 static int al_udma_q_enable(struct al_udma_q *udma_q, int enable)
212 {
213 	uint32_t reg = al_reg_read32(&udma_q->q_regs->rings.cfg);
214 
215 	if (enable) {
216 		reg |= (UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
217 		udma_q->status = AL_QUEUE_ENABLED;
218 	} else {
219 		reg &= ~(UDMA_M2S_Q_CFG_EN_PREF | UDMA_M2S_Q_CFG_EN_SCHEDULING);
220 		udma_q->status = AL_QUEUE_DISABLED;
221 	}
222 	al_reg_write32(&udma_q->q_regs->rings.cfg, reg);
223 	return 0;
224 }
225 
226 
227 /************************ API functions ***************************************/
228 
229 /* Initializations functions */
230 /*
231  * Initialize the udma engine
232  */
233 int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params)
234 {
235 	int i;
236 
237 	al_assert(udma);
238 
239 	if (udma_params->num_of_queues > DMA_MAX_Q) {
240 		al_err("udma: invalid num_of_queues parameter\n");
241 		return -EINVAL;
242 	}
243 
244 	udma->type = udma_params->type;
245 	udma->num_of_queues = udma_params->num_of_queues;
246 	udma->gen_regs = &udma_params->udma_regs_base->gen;
247 
248 	if (udma->type == UDMA_TX)
249 		udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->m2s;
250 	else
251 		udma->udma_regs = (union udma_regs *)&udma_params->udma_regs_base->s2m;
252 
253 	udma->rev_id = al_udma_get_revision(udma_params->udma_regs_base);
254 
255 	if (udma_params->name == NULL)
256 		udma->name = "";
257 	else
258 		udma->name = udma_params->name;
259 
260 	udma->state = UDMA_DISABLE;
261 	for (i = 0; i < DMA_MAX_Q; i++) {
262 		udma->udma_q[i].status = AL_QUEUE_NOT_INITIALIZED;
263 	}
264 	/* initialize configuration registers to correct values */
265 	al_udma_set_defaults(udma);
266 	al_dbg("udma [%s] initialized. base %p\n", udma->name,
267 		udma->udma_regs);
268 	return 0;
269 }
270 
271 /*
272  * Initialize the udma queue data structure
273  */
274 int al_udma_q_init(struct al_udma *udma, uint32_t qid,
275 					struct al_udma_q_params *q_params)
276 {
277 	struct al_udma_q *udma_q;
278 
279 	al_assert(udma);
280 	al_assert(q_params);
281 
282 	if (qid >= udma->num_of_queues) {
283 		al_err("udma: invalid queue id (%d)\n", qid);
284 		return -EINVAL;
285 	}
286 
287 	if (udma->udma_q[qid].status == AL_QUEUE_ENABLED) {
288 		al_err("udma: queue (%d) already enabled!\n", qid);
289 		return -EIO;
290 	}
291 
292 	if (q_params->size < AL_UDMA_MIN_Q_SIZE) {
293 		al_err("udma: queue (%d) size too small\n", qid);
294 		return -EINVAL;
295 	}
296 
297 	if (q_params->size > AL_UDMA_MAX_Q_SIZE) {
298 		al_err("udma: queue (%d) size too large\n", qid);
299 		return -EINVAL;
300 	}
301 
302 	if (q_params->size & (q_params->size - 1)) {
303 		al_err("udma: queue (%d) size (%d) must be power of 2\n",
304 			 q_params->size, qid);
305 		return -EINVAL;
306 	}
307 
308 	udma_q = &udma->udma_q[qid];
309 	/* set the queue's regs base address */
310 	if (udma->type == UDMA_TX)
311 		udma_q->q_regs = (union udma_q_regs __iomem *)
312 					&udma->udma_regs->m2s.m2s_q[qid];
313 	else
314 		udma_q->q_regs = (union udma_q_regs __iomem *)
315 					&udma->udma_regs->s2m.s2m_q[qid];
316 
317 	udma_q->adapter_rev_id = q_params->adapter_rev_id;
318 	udma_q->size = q_params->size;
319 	udma_q->size_mask = q_params->size - 1;
320 	udma_q->desc_base_ptr = q_params->desc_base;
321 	udma_q->desc_phy_base = q_params->desc_phy_base;
322 	udma_q->cdesc_base_ptr = q_params->cdesc_base;
323 	udma_q->cdesc_phy_base = q_params->cdesc_phy_base;
324 	udma_q->cdesc_size = q_params->cdesc_size;
325 
326 	udma_q->next_desc_idx = 0;
327 	udma_q->next_cdesc_idx = 0;
328 	udma_q->end_cdesc_ptr = (uint8_t *) udma_q->cdesc_base_ptr +
329 	    (udma_q->size - 1) * udma_q->cdesc_size;
330 	udma_q->comp_head_idx = 0;
331 	udma_q->comp_head_ptr = (union al_udma_cdesc *)udma_q->cdesc_base_ptr;
332 	udma_q->desc_ring_id = AL_UDMA_INITIAL_RING_ID;
333 	udma_q->comp_ring_id = AL_UDMA_INITIAL_RING_ID;
334 #if 0
335 	udma_q->desc_ctrl_bits = AL_UDMA_INITIAL_RING_ID <<
336 						AL_M2S_DESC_RING_ID_SHIFT;
337 #endif
338 	udma_q->pkt_crnt_descs = 0;
339 	udma_q->flags = 0;
340 	udma_q->status = AL_QUEUE_DISABLED;
341 	udma_q->udma = udma;
342 	udma_q->qid = qid;
343 
344 	/* start hardware configuration: */
345 	al_udma_q_config(udma_q);
346 	/* reset the queue pointers */
347 	al_udma_q_set_pointers(udma_q);
348 
349 	/* enable the q */
350 	al_udma_q_enable(udma_q, 1);
351 
352 	al_dbg("udma [%s %d]: %s q init. size 0x%x\n"
353 			"  desc ring info: phys base 0x%llx virt base %p)",
354 			udma_q->udma->name, udma_q->qid,
355 			udma->type == UDMA_TX ? "Tx" : "Rx",
356 			q_params->size,
357 			(unsigned long long)q_params->desc_phy_base,
358 			q_params->desc_base);
359 	al_dbg("  cdesc ring info: phys base 0x%llx virt base %p entry size 0x%x",
360 			(unsigned long long)q_params->cdesc_phy_base,
361 			q_params->cdesc_base,
362 			q_params->cdesc_size);
363 
364 	return 0;
365 }
366 
367 /*
368  * Reset a udma queue
369  */
370 int al_udma_q_reset(struct al_udma_q *udma_q)
371 {
372 	unsigned int remaining_time = AL_UDMA_Q_RST_TOUT;
373 	uint32_t *status_reg;
374 	uint32_t *dcp_reg;
375 	uint32_t *crhp_reg;
376 	uint32_t *q_sw_ctrl_reg;
377 
378 	al_assert(udma_q);
379 
380 	/* De-assert scheduling and prefetch */
381 	al_udma_q_enable(udma_q, 0);
382 
383 	/* Wait for scheduling and prefetch to stop */
384 	status_reg = &udma_q->q_regs->rings.status;
385 
386 	while (remaining_time) {
387 		uint32_t status = al_reg_read32(status_reg);
388 
389 		if (!(status & (UDMA_M2S_Q_STATUS_PREFETCH |
390 						UDMA_M2S_Q_STATUS_SCHEDULER)))
391 			break;
392 
393 		remaining_time--;
394 		al_udelay(1);
395 	}
396 
397 	if (!remaining_time) {
398 		al_err("udma [%s %d]: %s timeout waiting for prefetch and "
399 			"scheduler disable\n", udma_q->udma->name, udma_q->qid,
400 			__func__);
401 		return -ETIMEDOUT;
402 	}
403 
404 	/* Wait for the completion queue to reach to the same pointer as the
405 	 * prefetch stopped at ([TR]DCP == [TR]CRHP) */
406 	dcp_reg = &udma_q->q_regs->rings.dcp;
407 	crhp_reg = &udma_q->q_regs->rings.crhp;
408 
409 	while (remaining_time) {
410 		uint32_t dcp = al_reg_read32(dcp_reg);
411 		uint32_t crhp = al_reg_read32(crhp_reg);
412 
413 		if (dcp == crhp)
414 			break;
415 
416 		remaining_time--;
417 		al_udelay(1);
418 	};
419 
420 	if (!remaining_time) {
421 		al_err("udma [%s %d]: %s timeout waiting for dcp==crhp\n",
422 			udma_q->udma->name, udma_q->qid, __func__);
423 		return -ETIMEDOUT;
424 	}
425 
426 	/* Assert the queue reset */
427 	if (udma_q->udma->type == UDMA_TX)
428 		q_sw_ctrl_reg = &udma_q->q_regs->m2s_q.q_sw_ctrl;
429 	else
430 		q_sw_ctrl_reg = &udma_q->q_regs->s2m_q.q_sw_ctrl;
431 
432 	al_reg_write32(q_sw_ctrl_reg, UDMA_M2S_Q_SW_CTRL_RST_Q);
433 
434 	return 0;
435 }
436 
437 /*
438  * return (by reference) a pointer to a specific queue date structure.
439  */
440 int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
441 						struct al_udma_q **q_handle)
442 {
443 
444 	al_assert(udma);
445 	al_assert(q_handle);
446 
447 	if (unlikely(qid >= udma->num_of_queues)) {
448 		al_err("udma [%s]: invalid queue id (%d)\n", udma->name, qid);
449 		return -EINVAL;
450 	}
451 	*q_handle = &udma->udma_q[qid];
452 	return 0;
453 }
454 
455 /*
456  * Change the UDMA's state
457  */
458 int al_udma_state_set(struct al_udma *udma, enum al_udma_state state)
459 {
460 	uint32_t reg;
461 
462 	al_assert(udma != NULL);
463 	if (state == udma->state)
464 		al_dbg("udma [%s]: requested state identical to "
465 			"current state (%d)\n", udma->name, state);
466 
467 	al_dbg("udma [%s]: change state from (%s) to (%s)\n",
468 		 udma->name, al_udma_states_name[udma->state],
469 		 al_udma_states_name[state]);
470 
471 	reg = 0;
472 	switch (state) {
473 	case UDMA_DISABLE:
474 		reg |= UDMA_M2S_CHANGE_STATE_DIS;
475 		break;
476 	case UDMA_NORMAL:
477 		reg |= UDMA_M2S_CHANGE_STATE_NORMAL;
478 		break;
479 	case UDMA_ABORT:
480 		reg |= UDMA_M2S_CHANGE_STATE_ABORT;
481 		break;
482 	default:
483 		al_err("udma: invalid state (%d)\n", state);
484 		return -EINVAL;
485 	}
486 
487 	if (udma->type == UDMA_TX)
488 		al_reg_write32(&udma->udma_regs->m2s.m2s.change_state, reg);
489 	else
490 		al_reg_write32(&udma->udma_regs->s2m.s2m.change_state, reg);
491 
492 	udma->state = state;
493 	return 0;
494 }
495 
496 /*
497  * return the current UDMA hardware state
498  */
499 enum al_udma_state al_udma_state_get(struct al_udma *udma)
500 {
501 	uint32_t state_reg;
502 	uint32_t comp_ctrl;
503 	uint32_t stream_if;
504 	uint32_t data_rd;
505 	uint32_t desc_pref;
506 
507 	if (udma->type == UDMA_TX)
508 		state_reg = al_reg_read32(&udma->udma_regs->m2s.m2s.state);
509 	else
510 		state_reg = al_reg_read32(&udma->udma_regs->s2m.s2m.state);
511 
512 	comp_ctrl = AL_REG_FIELD_GET(state_reg,
513 				     UDMA_M2S_STATE_COMP_CTRL_MASK,
514 				     UDMA_M2S_STATE_COMP_CTRL_SHIFT);
515 	stream_if = AL_REG_FIELD_GET(state_reg,
516 				     UDMA_M2S_STATE_STREAM_IF_MASK,
517 				     UDMA_M2S_STATE_STREAM_IF_SHIFT);
518 	data_rd = AL_REG_FIELD_GET(state_reg,
519 				   UDMA_M2S_STATE_DATA_RD_CTRL_MASK,
520 				   UDMA_M2S_STATE_DATA_RD_CTRL_SHIFT);
521 	desc_pref = AL_REG_FIELD_GET(state_reg,
522 				     UDMA_M2S_STATE_DESC_PREF_MASK,
523 				     UDMA_M2S_STATE_DESC_PREF_SHIFT);
524 
525 	al_assert(comp_ctrl != UDMA_STATE_RESERVED);
526 	al_assert(stream_if != UDMA_STATE_RESERVED);
527 	al_assert(data_rd != UDMA_STATE_RESERVED);
528 	al_assert(desc_pref != UDMA_STATE_RESERVED);
529 
530 	/* if any of the states is abort then return abort */
531 	if ((comp_ctrl == UDMA_STATE_ABORT) || (stream_if == UDMA_STATE_ABORT)
532 			|| (data_rd == UDMA_STATE_ABORT)
533 			|| (desc_pref == UDMA_STATE_ABORT))
534 		return UDMA_ABORT;
535 
536 	/* if any of the states is normal then return normal */
537 	if ((comp_ctrl == UDMA_STATE_NORMAL)
538 			|| (stream_if == UDMA_STATE_NORMAL)
539 			|| (data_rd == UDMA_STATE_NORMAL)
540 			|| (desc_pref == UDMA_STATE_NORMAL))
541 		return UDMA_NORMAL;
542 
543 	return UDMA_IDLE;
544 }
545 
546 /*
547  * Action handling
548  */
549 
550 /*
551  * get next completed packet from completion ring of the queue
552  */
553 uint32_t al_udma_cdesc_packet_get(
554 	struct al_udma_q		*udma_q,
555 	volatile union al_udma_cdesc	**cdesc)
556 {
557 	uint32_t count;
558 	volatile union al_udma_cdesc *curr;
559 	uint32_t comp_flags;
560 
561 	/* this function requires the completion ring update */
562 	al_assert(!(udma_q->flags & AL_UDMA_Q_FLAGS_NO_COMP_UPDATE));
563 
564 	/* comp_head points to the last comp desc that was processed */
565 	curr = udma_q->comp_head_ptr;
566 	comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
567 
568 	/* check if the completion descriptor is new */
569 	if (unlikely(al_udma_new_cdesc(udma_q, comp_flags) == AL_FALSE))
570 		return 0;
571 	/* if new desc found, increment the current packets descriptors */
572 	count = udma_q->pkt_crnt_descs + 1;
573 	while (!cdesc_is_last(comp_flags)) {
574 		curr = al_cdesc_next_update(udma_q, curr);
575 		comp_flags = swap32_from_le(curr->al_desc_comp_tx.ctrl_meta);
576 		if (unlikely(al_udma_new_cdesc(udma_q, comp_flags)
577 								== AL_FALSE)) {
578 			/* the current packet here doesn't have all  */
579 			/* descriptors completed. log the current desc */
580 			/* location and number of completed descriptors so */
581 			/*  far. then return */
582 			udma_q->pkt_crnt_descs = count;
583 			udma_q->comp_head_ptr = curr;
584 			return 0;
585 		}
586 		count++;
587 		/* check against max descs per packet. */
588 		al_assert(count <= udma_q->size);
589 	}
590 	/* return back the first descriptor of the packet */
591 	*cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
592 	udma_q->pkt_crnt_descs = 0;
593 	udma_q->comp_head_ptr = al_cdesc_next_update(udma_q, curr);
594 
595 	al_dbg("udma [%s %d]: packet completed. first desc %p (ixd 0x%x)"
596 		 " descs %d\n", udma_q->udma->name, udma_q->qid, *cdesc,
597 		 udma_q->next_cdesc_idx, count);
598 
599 	return count;
600 }
601 
602 /** @} end of UDMA group */
603