xref: /freebsd/sys/contrib/alpine-hal/al_hal_udma.h (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 /**
38  * @defgroup group_udma_api API
39  * @ingroup group_udma
40  * UDMA API
41  * @{
42  * @}
43  *
44  * @defgroup group_udma_main UDMA Main
45  * @ingroup group_udma_api
46  * UDMA main API
47  * @{
48  * @file   al_hal_udma.h
49  *
50  * @brief C Header file for the Universal DMA HAL driver
51  *
52  */
53 
54 #ifndef __AL_HAL_UDMA_H__
55 #define __AL_HAL_UDMA_H__
56 
57 #include "al_hal_common.h"
58 #include "al_hal_udma_regs.h"
59 
60 /* *INDENT-OFF* */
61 #ifdef __cplusplus
62 extern "C" {
63 #endif
64 /* *INDENT-ON* */
65 
66 #define DMA_MAX_Q 	4
67 #define AL_UDMA_MIN_Q_SIZE 	4
68 #define AL_UDMA_MAX_Q_SIZE 	(1 << 16) /* hw can do more, but we limit it */
69 
70 /* Default Max number of descriptors supported per action */
71 #define AL_UDMA_DEFAULT_MAX_ACTN_DESCS	16
72 
73 #define AL_UDMA_REV_ID_0	0
74 #define AL_UDMA_REV_ID_1	1
75 #define AL_UDMA_REV_ID_2	2
76 
77 #define DMA_RING_ID_MASK	0x3
78 /* New registers ?? */
79 /* Statistics - TBD */
80 
81 /** UDMA submission descriptor */
82 union al_udma_desc {
83 	/* TX */
84 	struct {
85 		uint32_t len_ctrl;
86 		uint32_t meta_ctrl;
87 		uint64_t buf_ptr;
88 	} tx;
89 	/* TX Meta, used by upper layer */
90 	struct {
91 		uint32_t len_ctrl;
92 		uint32_t meta_ctrl;
93 		uint32_t meta1;
94 		uint32_t meta2;
95 	} tx_meta;
96 	/* RX */
97 	struct {
98 		uint32_t len_ctrl;
99 		uint32_t buf2_ptr_lo;
100 		uint64_t buf1_ptr;
101 	} rx;
102 } __packed_a16;
103 
104 /* TX desc length and control fields */
105 
106 #define AL_M2S_DESC_CONCAT			AL_BIT(31)	/* concatenate */
107 #define AL_M2S_DESC_DMB				AL_BIT(30)
108 						/** Data Memory Barrier */
109 #define AL_M2S_DESC_NO_SNOOP_H			AL_BIT(29)
110 #define AL_M2S_DESC_INT_EN			AL_BIT(28)	/** enable interrupt */
111 #define AL_M2S_DESC_LAST			AL_BIT(27)
112 #define AL_M2S_DESC_FIRST			AL_BIT(26)
113 #define AL_M2S_DESC_RING_ID_SHIFT		24
114 #define AL_M2S_DESC_RING_ID_MASK		(0x3 << AL_M2S_DESC_RING_ID_SHIFT)
115 #define AL_M2S_DESC_META_DATA			AL_BIT(23)
116 #define AL_M2S_DESC_DUMMY			AL_BIT(22) /* for Metdata only */
117 #define AL_M2S_DESC_LEN_ADJ_SHIFT		20
118 #define AL_M2S_DESC_LEN_ADJ_MASK		(0x7 << AL_M2S_DESC_LEN_ADJ_SHIFT)
119 #define AL_M2S_DESC_LEN_SHIFT			0
120 #define AL_M2S_DESC_LEN_MASK			(0xfffff << AL_M2S_DESC_LEN_SHIFT)
121 
122 #define AL_S2M_DESC_DUAL_BUF			AL_BIT(31)
123 #define AL_S2M_DESC_NO_SNOOP_H			AL_BIT(29)
124 #define AL_S2M_DESC_INT_EN			AL_BIT(28)	/** enable interrupt */
125 #define AL_S2M_DESC_RING_ID_SHIFT		24
126 #define AL_S2M_DESC_RING_ID_MASK		(0x3 << AL_S2M_DESC_RING_ID_SHIFT)
127 #define AL_S2M_DESC_LEN_SHIFT			0
128 #define AL_S2M_DESC_LEN_MASK			(0xffff << AL_S2M_DESC_LEN_SHIFT)
129 #define AL_S2M_DESC_LEN2_SHIFT			16
130 #define AL_S2M_DESC_LEN2_MASK			(0x3fff << AL_S2M_DESC_LEN2_SHIFT)
131 #define AL_S2M_DESC_LEN2_GRANULARITY_SHIFT	6
132 
133 /* TX/RX descriptor VMID field (in the buffer address 64 bit field) */
134 #define AL_UDMA_DESC_VMID_SHIFT		48
135 
136 /** UDMA completion descriptor */
137 union al_udma_cdesc {
138 	/* TX completion */
139 	struct {
140 		uint32_t ctrl_meta;
141 	} al_desc_comp_tx;
142 	/* RX completion */
143 	struct {
144 		/* TBD */
145 		uint32_t ctrl_meta;
146 	} al_desc_comp_rx;
147 } __packed_a4;
148 
149 /* TX/RX common completion desc ctrl_meta feilds */
150 #define AL_UDMA_CDESC_ERROR		AL_BIT(31)
151 #define AL_UDMA_CDESC_BUF1_USED		AL_BIT(30)
152 #define AL_UDMA_CDESC_DDP		AL_BIT(29)
153 #define AL_UDMA_CDESC_LAST		AL_BIT(27)
154 #define AL_UDMA_CDESC_FIRST		AL_BIT(26)
155 /* word 2 */
156 #define AL_UDMA_CDESC_BUF2_USED			AL_BIT(31)
157 #define AL_UDMA_CDESC_BUF2_LEN_SHIFT	16
158 #define AL_UDMA_CDESC_BUF2_LEN_MASK		AL_FIELD_MASK(29, 16)
159 /** Basic Buffer structure */
160 struct al_buf {
161 	al_phys_addr_t addr; /**< Buffer physical address */
162 	uint32_t len; /**< Buffer lenght in bytes */
163 };
164 
165 /** Block is a set of buffers that belong to same source or destination */
166 struct al_block {
167 	struct al_buf *bufs; /**< The buffers of the block */
168 	uint32_t num; /**< Number of buffers of the block */
169 
170 	/**<
171 	 * VMID to be assigned to the block descriptors
172 	 * Requires VMID in descriptor to be enabled for the specific UDMA
173 	 * queue.
174 	 */
175 	uint16_t vmid;
176 };
177 
178 /** UDMA type */
179 enum al_udma_type {
180 	UDMA_TX,
181 	UDMA_RX
182 };
183 
184 /** UDMA state */
185 enum al_udma_state {
186 	UDMA_DISABLE = 0,
187 	UDMA_IDLE,
188 	UDMA_NORMAL,
189 	UDMA_ABORT,
190 	UDMA_RESET
191 };
192 
193 extern const char *const al_udma_states_name[];
194 
195 /** UDMA Q specific parameters from upper layer */
196 struct al_udma_q_params {
197 	uint32_t size;		/**< ring size (in descriptors), submission and
198 				 * completion rings must have same size
199 				 */
200 	union al_udma_desc *desc_base; /**< cpu address for submission ring
201 					 * descriptors
202 					 */
203 	al_phys_addr_t desc_phy_base;	/**< submission ring descriptors
204 					 * physical base address
205 					 */
206 #ifdef __FreeBSD__
207 	bus_dma_tag_t desc_phy_base_tag;
208 	bus_dmamap_t desc_phy_base_map;
209 #endif
210 	uint8_t *cdesc_base;	/**< completion descriptors pointer, NULL */
211 				/* means no completion update */
212 	al_phys_addr_t cdesc_phy_base;	/**< completion descriptors ring
213 					 * physical base address
214 					 */
215 #ifdef __FreeBSD__
216 	bus_dma_tag_t cdesc_phy_base_tag;
217 	bus_dmamap_t cdesc_phy_base_map;
218 #endif
219 	uint32_t cdesc_size;	/**< size (in bytes) of a single dma completion
220 					* descriptor
221 					*/
222 
223 	uint8_t adapter_rev_id; /**<PCI adapter revision ID */
224 };
225 
226 /** UDMA parameters from upper layer */
227 struct al_udma_params {
228 	struct unit_regs __iomem *udma_regs_base;
229 	enum al_udma_type type;	/**< Tx or Rx */
230 	uint8_t num_of_queues; /**< number of queues supported by the UDMA */
231 	const char *name; /**< the upper layer must keep the string area */
232 };
233 
234 /* Fordward decleration */
235 struct al_udma;
236 
237 /** SW status of a queue */
238 enum al_udma_queue_status {
239 	AL_QUEUE_NOT_INITIALIZED = 0,
240 	AL_QUEUE_DISABLED,
241 	AL_QUEUE_ENABLED,
242 	AL_QUEUE_ABORTED
243 };
244 
245 /** UDMA Queue private data structure */
246 struct __cache_aligned al_udma_q {
247 	uint16_t size_mask;		/**< mask used for pointers wrap around
248 					 * equals to size - 1
249 					 */
250 	union udma_q_regs __iomem *q_regs; /**< pointer to the per queue UDMA
251 					   * registers
252 					   */
253 	union al_udma_desc *desc_base_ptr; /**< base address submission ring
254 						* descriptors
255 						*/
256 	uint16_t next_desc_idx; /**< index to the next available submission
257 				      * descriptor
258 				      */
259 
260 	uint32_t desc_ring_id;	/**< current submission ring id */
261 
262 	uint8_t *cdesc_base_ptr;/**< completion descriptors pointer, NULL */
263 				/* means no completion */
264 	uint32_t cdesc_size;	/**< size (in bytes) of the udma completion ring
265 				 * descriptor
266 				 */
267 	uint16_t next_cdesc_idx; /**< index in descriptors for next completing
268 			      * ring descriptor
269 			      */
270 	uint8_t *end_cdesc_ptr;	/**< used for wrap around detection */
271 	uint16_t comp_head_idx; /**< completion ring head pointer register
272 				 *shadow
273 				 */
274 	volatile union al_udma_cdesc *comp_head_ptr; /**< when working in get_packet mode
275 				       * we maintain pointer instead of the
276 				       * above idx
277 				       */
278 
279 	uint32_t pkt_crnt_descs; /**< holds the number of processed descriptors
280 				  * of the current packet
281 				  */
282 	uint32_t comp_ring_id;	/**< current completion Ring Id */
283 
284 
285 	al_phys_addr_t desc_phy_base; /**< submission desc. physical base */
286 	al_phys_addr_t cdesc_phy_base; /**< completion desc. physical base */
287 
288 	uint32_t flags; /**< flags used for completion modes */
289 	uint32_t size;		/**< ring size in descriptors  */
290 	enum al_udma_queue_status status;
291 	struct al_udma *udma;	/**< pointer to parent UDMA */
292 	uint32_t qid;		/**< the index number of the queue */
293 
294 	/*
295 	 * The following fields are duplicated from the UDMA parent adapter
296 	 * due to performance considerations.
297 	 */
298 	uint8_t adapter_rev_id; /**<PCI adapter revision ID */
299 };
300 
301 /* UDMA */
302 struct al_udma {
303 	const char *name;
304 	enum al_udma_type type;	/* Tx or Rx */
305 	enum al_udma_state state;
306 	uint8_t num_of_queues; /* number of queues supported by the UDMA */
307 	union udma_regs __iomem *udma_regs; /* pointer to the UDMA registers */
308 	struct udma_gen_regs *gen_regs;		/* pointer to the Gen registers*/
309 	struct al_udma_q udma_q[DMA_MAX_Q];	/* Array of UDMA Qs pointers */
310 	unsigned int rev_id; /* UDMA revision ID */
311 };
312 
313 
314 /*
315  * Configurations
316  */
317 
318 /* Initializations functions */
319 /**
320  * Initialize the udma engine
321  *
322  * @param udma udma data structure
323  * @param udma_params udma parameters from upper layer
324  *
325  * @return 0 on success. -EINVAL otherwise.
326  */
327 int al_udma_init(struct al_udma *udma, struct al_udma_params *udma_params);
328 
329 /**
330  * Initialize the udma queue data structure
331  *
332  * @param udma
333  * @param qid
334  * @param q_params
335  *
336  * @return 0 if no error found.
337  *	   -EINVAL if the qid is out of range
338  *	   -EIO if queue was already initialized
339  */
340 
341 int al_udma_q_init(struct al_udma *udma, uint32_t qid,
342 		   struct al_udma_q_params *q_params);
343 
344 /**
345  * Reset a udma queue
346  *
347  * Prior to calling this function make sure:
348  * 1. Queue interrupts are masked
349  * 2. No additional descriptors are written to the descriptor ring of the queue
350  * 3. No completed descriptors are being fetched
351  *
352  * The queue can be initialized again using 'al_udma_q_init'
353  *
354  * @param udma_q
355  *
356  * @return 0 if no error found.
357  */
358 
359 int al_udma_q_reset(struct al_udma_q *udma_q);
360 
361 /**
362  * return (by reference) a pointer to a specific queue date structure.
363  * this pointer needed for calling functions (i.e. al_udma_desc_action_add) that
364  * require this pointer as input argument.
365  *
366  * @param udma udma data structure
367  * @param qid queue index
368  * @param q_handle pointer to the location where the queue structure pointer
369  * written to.
370  *
371  * @return  0 on success. -EINVAL otherwise.
372  */
373 int al_udma_q_handle_get(struct al_udma *udma, uint32_t qid,
374 		      struct al_udma_q **q_handle);
375 
376 /**
377  * Change the UDMA's state
378  *
379  * @param udma udma data structure
380  * @param state the target state
381  *
382  * @return 0
383  */
384 int al_udma_state_set(struct al_udma *udma, enum al_udma_state state);
385 
386 /**
387  * return the current UDMA hardware state
388  *
389  * @param udma udma handle
390  *
391  * @return the UDMA state as reported by the hardware.
392  */
393 enum al_udma_state al_udma_state_get(struct al_udma *udma);
394 
395 /*
396  * Action handling
397  */
398 
399 /**
400  * get number of descriptors that can be submitted to the udma.
401  * keep one free descriptor to simplify full/empty management
402  * @param udma_q queue handle
403  *
404  * @return num of free descriptors.
405  */
406 static INLINE uint32_t al_udma_available_get(struct al_udma_q *udma_q)
407 {
408 	uint16_t tmp = udma_q->next_cdesc_idx - (udma_q->next_desc_idx + 1);
409 	tmp &= udma_q->size_mask;
410 
411 	return (uint32_t) tmp;
412 }
413 
414 /**
415  * check if queue has pending descriptors
416  *
417  * @param udma_q queue handle
418  *
419  * @return AL_TRUE if descriptors are submitted to completion ring and still
420  * not completed (with ack). AL_FALSE otherwise.
421  */
422 static INLINE al_bool al_udma_is_empty(struct al_udma_q *udma_q)
423 {
424 	if (((udma_q->next_cdesc_idx - udma_q->next_desc_idx) &
425 	     udma_q->size_mask) == 0)
426 		return AL_TRUE;
427 
428 	return AL_FALSE;
429 }
430 
431 /**
432  * get next available descriptor
433  * @param udma_q queue handle
434  *
435  * @return pointer to the next available descriptor
436  */
437 static INLINE union al_udma_desc *al_udma_desc_get(struct al_udma_q *udma_q)
438 {
439 	union al_udma_desc *desc;
440 	uint16_t next_desc_idx;
441 
442 	al_assert(udma_q);
443 
444 	next_desc_idx = udma_q->next_desc_idx;
445 	desc = udma_q->desc_base_ptr + next_desc_idx;
446 
447 	next_desc_idx++;
448 
449 	/* if reached end of queue, wrap around */
450 	udma_q->next_desc_idx = next_desc_idx & udma_q->size_mask;
451 
452 	return desc;
453 }
454 
455 /**
456  * get ring id for the last allocated descriptor
457  * @param udma_q
458  *
459  * @return ring id for the last allocated descriptor
460  * this function must be called each time a new descriptor is allocated
461  * by the al_udma_desc_get(), unless ring id is ignored.
462  */
463 static INLINE uint32_t al_udma_ring_id_get(struct al_udma_q *udma_q)
464 {
465 	uint32_t ring_id;
466 
467 	al_assert(udma_q);
468 
469 	ring_id = udma_q->desc_ring_id;
470 
471 	/* calculate the ring id of the next desc */
472 	/* if next_desc points to first desc, then queue wrapped around */
473 	if (unlikely(udma_q->next_desc_idx) == 0)
474 		udma_q->desc_ring_id = (udma_q->desc_ring_id + 1) &
475 			DMA_RING_ID_MASK;
476 	return ring_id;
477 }
478 
479 /* add DMA action - trigger the engine */
480 /**
481  * add num descriptors to the submission queue.
482  *
483  * @param udma_q queue handle
484  * @param num number of descriptors to add to the queues ring.
485  *
486  * @return 0;
487  */
488 static INLINE int al_udma_desc_action_add(struct al_udma_q *udma_q,
489 					  uint32_t num)
490 {
491 	uint32_t *addr;
492 
493 	al_assert(udma_q);
494 	al_assert((num > 0) && (num <= udma_q->size));
495 
496 	addr = &udma_q->q_regs->rings.drtp_inc;
497 	/* make sure data written to the descriptors will be visible by the */
498 	/* DMA */
499 	al_local_data_memory_barrier();
500 
501 	/*
502 	 * As we explicitly invoke the synchronization function
503 	 * (al_data_memory_barrier()), then we can use the relaxed version.
504 	 */
505 	al_reg_write32_relaxed(addr, num);
506 
507 	return 0;
508 }
509 
510 #define cdesc_is_first(flags) ((flags) & AL_UDMA_CDESC_FIRST)
511 #define cdesc_is_last(flags) ((flags) & AL_UDMA_CDESC_LAST)
512 
513 /**
514  * return pointer to the cdesc + offset desciptors. wrap around when needed.
515  *
516  * @param udma_q queue handle
517  * @param cdesc pointer that set by this function
518  * @param offset offset desciptors
519  *
520  */
521 static INLINE volatile union al_udma_cdesc *al_cdesc_next(
522 	struct al_udma_q		*udma_q,
523 	volatile union al_udma_cdesc	*cdesc,
524 	uint32_t			offset)
525 {
526 	volatile uint8_t *tmp = (volatile uint8_t *) cdesc + offset * udma_q->cdesc_size;
527 	al_assert(udma_q);
528 	al_assert(cdesc);
529 
530 	/* if wrap around */
531 	if (unlikely((tmp > udma_q->end_cdesc_ptr)))
532 		return (union al_udma_cdesc *)
533 			(udma_q->cdesc_base_ptr +
534 			(tmp - udma_q->end_cdesc_ptr - udma_q->cdesc_size));
535 
536 	return (volatile union al_udma_cdesc *) tmp;
537 }
538 
539 /**
540  * check if the flags of the descriptor indicates that is new one
541  * the function uses the ring id from the descriptor flags to know whether it
542  * new one by comparing it with the curring ring id of the queue
543  *
544  * @param udma_q queue handle
545  * @param flags the flags of the completion descriptor
546  *
547  * @return AL_TRUE if the completion descriptor is new one.
548  * 	AL_FALSE if it old one.
549  */
550 static INLINE al_bool al_udma_new_cdesc(struct al_udma_q *udma_q,
551 								uint32_t flags)
552 {
553 	if (((flags & AL_M2S_DESC_RING_ID_MASK) >> AL_M2S_DESC_RING_ID_SHIFT)
554 	    == udma_q->comp_ring_id)
555 		return AL_TRUE;
556 	return AL_FALSE;
557 }
558 
559 /**
560  * get next completion descriptor
561  * this function will also increment the completion ring id when the ring wraps
562  * around
563  *
564  * @param udma_q queue handle
565  * @param cdesc current completion descriptor
566  *
567  * @return pointer to the completion descriptor that follows the one pointed by
568  * cdesc
569  */
570 static INLINE volatile union al_udma_cdesc *al_cdesc_next_update(
571 	struct al_udma_q		*udma_q,
572 	volatile union al_udma_cdesc	*cdesc)
573 {
574 	/* if last desc, wrap around */
575 	if (unlikely(((volatile uint8_t *) cdesc == udma_q->end_cdesc_ptr))) {
576 		udma_q->comp_ring_id =
577 		    (udma_q->comp_ring_id + 1) & DMA_RING_ID_MASK;
578 		return (union al_udma_cdesc *) udma_q->cdesc_base_ptr;
579 	}
580 	return (volatile union al_udma_cdesc *) ((volatile uint8_t *) cdesc + udma_q->cdesc_size);
581 }
582 
583 /**
584  * get next completed packet from completion ring of the queue
585  *
586  * @param udma_q udma queue handle
587  * @param desc pointer that set by this function to the first descriptor
588  * note: desc is valid only when return value is not zero
589  * @return number of descriptors that belong to the packet. 0 means no completed
590  * full packet was found.
591  * If the descriptors found in the completion queue don't form full packet (no
592  * desc with LAST flag), then this function will do the following:
593  * (1) save the number of processed descriptors.
594  * (2) save last processed descriptor, so next time it called, it will resume
595  *     from there.
596  * (3) return 0.
597  * note: the descriptors that belong to the completed packet will still be
598  * considered as used, that means the upper layer is safe to access those
599  * descriptors when this function returns. the al_udma_cdesc_ack() should be
600  * called to inform the udma driver that those descriptors are freed.
601  */
602 uint32_t al_udma_cdesc_packet_get(
603 	struct al_udma_q		*udma_q,
604 	volatile union al_udma_cdesc	**desc);
605 
606 /** get completion descriptor pointer from its index */
607 #define al_udma_cdesc_idx_to_ptr(udma_q, idx)				\
608 	((volatile union al_udma_cdesc *) ((udma_q)->cdesc_base_ptr +	\
609 				(idx) * (udma_q)->cdesc_size))
610 
611 
612 /**
613  * return number of all completed descriptors in the completion ring
614  *
615  * @param udma_q udma queue handle
616  * @param cdesc pointer that set by this function to the first descriptor
617  * note: desc is valid only when return value is not zero
618  * note: pass NULL if not interested
619  * @return number of descriptors. 0 means no completed descriptors were found.
620  * note: the descriptors that belong to the completed packet will still be
621  * considered as used, that means the upper layer is safe to access those
622  * descriptors when this function returns. the al_udma_cdesc_ack() should be
623  * called to inform the udma driver that those descriptors are freed.
624  */
625 static INLINE uint32_t al_udma_cdesc_get_all(
626 	struct al_udma_q		*udma_q,
627 	volatile union al_udma_cdesc	**cdesc)
628 {
629 	uint16_t count = 0;
630 
631 	al_assert(udma_q);
632 
633 	udma_q->comp_head_idx = (uint16_t)
634 				(al_reg_read32(&udma_q->q_regs->rings.crhp) &
635 						0xFFFF);
636 
637 	count = (udma_q->comp_head_idx - udma_q->next_cdesc_idx) &
638 		udma_q->size_mask;
639 
640 	if (cdesc)
641 		*cdesc = al_udma_cdesc_idx_to_ptr(udma_q, udma_q->next_cdesc_idx);
642 
643 	return (uint32_t)count;
644 }
645 
646 /**
647  * acknowledge the driver that the upper layer completed processing completion
648  * descriptors
649  *
650  * @param udma_q udma queue handle
651  * @param num number of descriptors to acknowledge
652  *
653  * @return 0
654  */
655 static INLINE int al_udma_cdesc_ack(struct al_udma_q *udma_q, uint32_t num)
656 {
657 	al_assert(udma_q);
658 
659 	udma_q->next_cdesc_idx += num;
660 	udma_q->next_cdesc_idx &= udma_q->size_mask;
661 
662 	return 0;
663 }
664 
665 /* *INDENT-OFF* */
666 #ifdef __cplusplus
667 }
668 #endif
669 /* *INDENT-ON* */
670 
671 #endif /* __AL_HAL_UDMA_H__ */
672 /** @} end of UDMA group */
673