xref: /freebsd/sys/contrib/alpine-hal/al_hal_udma_config.h (revision 86c9d9918f1db7cdd968b60f8902466887bcd9e9)
1 /*-
2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 /**
38  * @defgroup group_udma_config UDMA Config
39  * @ingroup group_udma_api
40  *  UDMA Config API
41  *  @{
42  * @file   al_hal_udma_config.h
43  *
44  * @brief C Header file for the Universal DMA HAL driver for configuration APIs
45  *
46  */
47 
48 #ifndef __AL_HAL_UDMA_CONFIG_H__
49 #define __AL_HAL_UDMA_CONFIG_H__
50 
51 #include <al_hal_udma.h>
52 
53 
54 /* *INDENT-OFF* */
55 #ifdef __cplusplus
56 extern "C" {
57 #endif
58 /* *INDENT-ON* */
59 
60 /** Scheduling mode */
61 enum al_udma_sch_mode {
62 	STRICT,			/* Strict */
63 	SRR,			/* Simple Sound Rubin */
64 	DWRR			/* Deficit Weighted Round Rubin */
65 };
66 
67 /** AXI configuration */
68 struct al_udma_axi_conf {
69 	uint32_t axi_timeout;	/* Timeout for AXI transactions  */
70 	uint8_t arb_promotion;	/* arbitration promotion */
71 	al_bool swap_8_bytes;	/* enable 8 bytes swap instead of 4 bytes */
72 	al_bool swap_s2m_data;
73 	al_bool swap_s2m_desc;
74 	al_bool swap_m2s_data;
75 	al_bool swap_m2s_desc;
76 };
77 
78 /** UDMA AXI M2S configuration */
79 struct al_udma_axi_submaster {
80 	uint8_t id; /* AXI ID */
81 	uint8_t cache_type;
82 	uint8_t burst;
83 	uint16_t used_ext;
84 	uint8_t bus_size;
85 	uint8_t qos;
86 	uint8_t prot;
87 	uint8_t max_beats;
88 };
89 
90 /** UDMA AXI M2S configuration */
91 struct al_udma_m2s_axi_conf {
92 	struct al_udma_axi_submaster comp_write;
93 	struct al_udma_axi_submaster data_read;
94 	struct al_udma_axi_submaster desc_read;
95 	al_bool break_on_max_boundary; /* Data read break on max boundary */
96 	uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
97 	uint8_t ostand_max_data_read;
98 	uint8_t ostand_max_desc_read;
99 	uint8_t ostand_max_comp_req;
100 	uint8_t ostand_max_comp_write;
101 };
102 
103 /** UDMA AXI S2M configuration */
104 struct al_udma_s2m_axi_conf {
105 	struct al_udma_axi_submaster data_write;
106 	struct al_udma_axi_submaster desc_read;
107 	struct al_udma_axi_submaster comp_write;
108 	al_bool break_on_max_boundary; /* Data read break on max boundary */
109 	uint8_t min_axi_beats; /* Minimum burst for writing completion desc. */
110 	uint8_t ostand_max_data_req;
111 	uint8_t ostand_max_data_write;
112 	uint8_t ostand_max_comp_req;
113 	uint8_t ostand_max_comp_write;
114 	uint8_t ostand_max_desc_read;
115 	uint8_t ack_fifo_depth;	/* size of the stream application ack fifo */
116 };
117 
118 /** M2S error logging */
119 struct al_udma_err_log {
120 	uint32_t error_status;
121 	uint32_t header[4];
122 };
123 
124 /** M2S max packet size configuration */
125 struct al_udma_m2s_pkt_len_conf {
126 	uint32_t max_pkt_size;
127 	al_bool encode_64k_as_zero;
128 };
129 
130 /** M2S Descriptor Prefetch configuration */
131 struct al_udma_m2s_desc_pref_conf {
132 	uint8_t desc_fifo_depth;
133 	enum al_udma_sch_mode sch_mode;	/* Scheduling mode
134 					* (either strict or RR) */
135 
136 	uint8_t max_desc_per_packet;	/* max number of descriptors to
137 					 * prefetch */
138 	/* in one burst (5b) */
139 	uint8_t pref_thr;
140 	uint8_t min_burst_above_thr;	/* min burst size when fifo above
141 					* pref_thr (4b)
142 					*/
143 	uint8_t min_burst_below_thr;	/* min burst size when fifo below
144 					* pref_thr (4b)
145 					*/
146 	uint8_t max_pkt_limit;		/* maximum number of packets in the data
147 					* read FIFO, defined based on header
148 					* FIFO size
149 					*/
150 	uint16_t data_fifo_depth;	/* maximum number of data beats in the
151 					* data read FIFO,
152 					* defined based on header FIFO size
153 					*/
154 };
155 
156 /** S2M Descriptor Prefetch configuration */
157 struct al_udma_s2m_desc_pref_conf {
158 	uint8_t desc_fifo_depth;
159 	enum al_udma_sch_mode sch_mode;	/* Scheduling mode *
160 					* (either strict or RR)
161 					*/
162 
163 	al_bool q_promotion;		/* enable promotion */
164 	al_bool force_promotion;	/* force promotion  */
165 	al_bool en_pref_prediction;	/* enable prefetch prediction */
166 	uint8_t promotion_th;		/* Threshold for queue promotion */
167 
168 	uint8_t pref_thr;
169 	uint8_t min_burst_above_thr;	/* min burst size when fifo above
170 	 	 	 	 	 * pref_thr (4b)
171 	 	 	 	 	 */
172 	uint8_t min_burst_below_thr;	/* min burst size when fifo below
173 	 	 	 	 	 * pref_thr (4b)
174 	 	 	 	 	 */
175 	uint8_t a_full_thr;		/* almost full threshold */
176 };
177 
178 /** S2M Data write configuration */
179 struct al_udma_s2m_data_write_conf {
180 	uint16_t data_fifo_depth;	/* maximum number of data beats in the
181 					 * data write FIFO, defined based on
182 					 * header FIFO size
183 					 */
184 	uint8_t max_pkt_limit;		/* maximum number of packets in the
185 					* data write FIFO,defined based on
186 					* header FIFO size
187 					*/
188 	uint8_t fifo_margin;
189 	uint32_t desc_wait_timer;	/* waiting time for the host to write
190 					* new descriptor to the queue
191 					* (for the current packet in process)
192 					*/
193 	uint32_t flags;			/* bitwise of flags of s2m
194 					 * data_cfg_2 register
195 					 */
196 };
197 
198 /** S2M Completion configuration */
199 struct al_udma_s2m_completion_conf {
200 	uint8_t desc_size;		/* Size of completion descriptor
201 					 * in words
202 					 */
203 	al_bool cnt_words;		/* Completion fifo in use counter:
204 	 	 	 	 	 * AL_TRUE words, AL_FALS descriptors
205 	 	 	 	 	 */
206 	al_bool q_promotion;		/* Enable promotion of the current
207 					 * unack in progress */
208 					/* in the completion write scheduler */
209 	al_bool force_rr;		/* force RR arbitration in the
210 					*  scheduler
211 					*/
212   //	uint8_t ack_fifo_depth;		/* size of the stream application ack fifo */
213 	uint8_t q_free_min;		/* minimum number of free completion
214 					 * entries
215 					 */
216 					/* to qualify for promotion */
217 
218 	uint16_t comp_fifo_depth;	/* Size of completion fifo in words */
219 	uint16_t unack_fifo_depth;	/* Size of unacked fifo in descs */
220 	uint32_t timeout;		/* Ack timout from stream interface */
221 };
222 
223 /** M2S UDMA DWRR configuration */
224 struct al_udma_m2s_dwrr_conf {
225 	al_bool enable_dwrr;
226 	uint8_t inc_factor;
227 	uint8_t weight;
228 	al_bool pkt_mode;
229 	uint32_t deficit_init_val;
230 };
231 
232 /** M2S DMA Rate Limitation mode */
233 struct al_udma_m2s_rlimit_mode {
234 	al_bool pkt_mode_en;
235 	uint16_t short_cycle_sz;
236 	uint32_t token_init_val;
237 };
238 
239 /** M2S Stream/Q Rate Limitation */
240 struct al_udma_m2s_rlimit_cfg {
241 	uint32_t max_burst_sz;	/* maximum number of accumulated bytes in the
242 				 * token counter
243 				 */
244 	uint16_t long_cycle_sz;	/* number of short cycles between token fill */
245 	uint32_t long_cycle;	/* number of bits to add in each long cycle */
246 	uint32_t short_cycle;	/* number of bits to add in each cycle */
247 	uint32_t mask;		/* mask the different types of rate limiters */
248 };
249 
250 enum al_udma_m2s_rlimit_action {
251 	AL_UDMA_STRM_RLIMIT_ENABLE,
252 	AL_UDMA_STRM_RLIMIT_PAUSE,
253 	AL_UDMA_STRM_RLIMIT_RESET
254 };
255 
256 /** M2S UDMA Q scheduling configuration */
257 struct al_udma_m2s_q_dwrr_conf {
258 	uint32_t max_deficit_cnt_sz;	/*maximum number of accumulated bytes
259 					* in the deficit counter
260 					*/
261 	al_bool strict;		/* bypass DWRR */
262 	uint8_t axi_qos;
263 	uint16_t q_qos;
264 	uint8_t weight;
265 };
266 
267 /** M2S UDMA / UDMA Q scheduling configuration */
268 struct al_udma_m2s_sc {
269 	enum al_udma_sch_mode sch_mode;	/* Scheduling Mode */
270 	struct al_udma_m2s_dwrr_conf dwrr;	/* DWRR configuration */
271 };
272 
273 /** UDMA / UDMA Q rate limitation configuration */
274 struct al_udma_m2s_rlimit {
275 	struct al_udma_m2s_rlimit_mode rlimit_mode;
276 						/* rate limitation enablers */
277 #if 0
278 	struct al_udma_tkn_bkt_conf token_bkt;        /* Token Bucket configuration */
279 #endif
280 };
281 
282 /** UDMA Data read configuration */
283 struct al_udma_m2s_data_rd_conf {
284 	uint8_t max_rd_d_beats;		/* max burst size for reading data
285 					 * (in AXI beats-128b) (5b)
286 					 */
287 	uint8_t max_rd_d_out_req;	/* max number of outstanding data
288 					 * read requests (6b)
289 					 */
290 	uint16_t max_rd_d_out_beats;	/* max num. of data read beats (10b) */
291 };
292 
293 /** M2S UDMA completion and application timeouts */
294 struct al_udma_m2s_comp_timeouts {
295 	enum al_udma_sch_mode sch_mode;	/* Scheduling mode
296 					 * (either strict or RR)
297 					 */
298 	al_bool enable_q_promotion;
299 	uint8_t unack_fifo_depth;	/* unacked desc fifo size */
300 	uint8_t comp_fifo_depth;	/* desc fifo size */
301 	uint32_t coal_timeout;	/* (24b) */
302 	uint32_t app_timeout;	/* (24b) */
303 };
304 
305 /** S2M UDMA per queue completion configuration */
306 struct al_udma_s2m_q_comp_conf {
307 	al_bool dis_comp_coal;		/* disable completion coalescing */
308 	al_bool en_comp_ring_update;	/* enable writing completion descs */
309 	uint32_t comp_timer;		/* completion coalescing timer */
310 	al_bool en_hdr_split;		/* enable header split */
311 	al_bool force_hdr_split;	/* force header split */
312 	uint16_t hdr_split_size;	/* size used for the header split */
313 	uint8_t q_qos;			/* queue QoS */
314 };
315 
316 /** UDMA per queue VMID control configuration */
317 struct al_udma_gen_vmid_q_conf {
318 	/* Enable usage of the VMID per queue according to 'vmid' */
319 	al_bool queue_en;
320 
321 	/* Enable usage of the VMID from the descriptor buffer address 63:48 */
322 	al_bool desc_en;
323 
324 	/* VMID to be applied when 'queue_en' is asserted */
325 	uint16_t vmid;
326 
327 	/* VMADDR to be applied to msbs when 'desc_en' is asserted.
328 	 * Relevant for revisions >= AL_UDMA_REV_ID_REV2 */
329 	uint16_t vmaddr;
330 };
331 
332 /** UDMA VMID control configuration */
333 struct al_udma_gen_vmid_conf {
334 	/* TX queue configuration */
335 	struct al_udma_gen_vmid_q_conf tx_q_conf[DMA_MAX_Q];
336 
337 	/* RX queue configuration */
338 	struct al_udma_gen_vmid_q_conf rx_q_conf[DMA_MAX_Q];
339 };
340 
341 /** UDMA VMID MSIX control configuration */
342 struct al_udma_gen_vmid_msix_conf {
343 	/* Enable write to all VMID_n registers in the MSI-X Controller */
344 	al_bool access_en;
345 
346 	/* use VMID_n [7:0] from MSI-X Controller for MSI-X message */
347 	al_bool sel;
348 };
349 
350 /** UDMA per Tx queue advanced VMID control configuration */
351 struct al_udma_gen_vmid_advanced_tx_q_conf {
352 	/**********************************************************************
353 	 * Tx Data VMID
354 	 **********************************************************************/
355 	/* Tx data VMID enable */
356 	al_bool tx_q_data_vmid_en;
357 
358 	/*
359 	 * For Tx data reads, replacement bits for the original address.
360 	 * The number of bits replaced is determined according to
361 	 * 'tx_q_addr_hi_sel'
362 	 */
363 	unsigned int tx_q_addr_hi;
364 
365 	/*
366 	 * For Tx data reads, 6 bits serving the number of bits taken from the
367 	 * extra register on account of bits coming from the original address
368 	 * field.
369 	 * When 'tx_q_addr_hi_sel'=32 all of 'tx_q_addr_hi' will be taken.
370 	 * When 'tx_q_addr_hi_sel'=0 none of it will be taken, and when any
371 	 * value in between, it will start from the MSB bit and sweep down as
372 	 * many bits as needed. For example if 'tx_q_addr_hi_sel'=8, the final
373 	 * address [63:56] will carry 'tx_q_addr_hi'[31:24] while [55:32] will
374 	 * carry the original buffer address[55:32].
375 	 */
376 	unsigned int tx_q_addr_hi_sel;
377 
378 	/*
379 	 * Tx data read VMID
380 	 * Masked per bit with 'tx_q_data_vmid_mask'
381 	 */
382 	unsigned int tx_q_data_vmid;
383 
384 	/*
385 	 * Tx data read VMID mask
386 	 * Each '1' selects from the buffer address, each '0' selects from
387 	 * 'tx_q_data_vmid'
388 	 */
389 	unsigned int tx_q_data_vmid_mask;
390 
391 	/**********************************************************************
392 	 * Tx prefetch VMID
393 	 **********************************************************************/
394 	/* Tx prefetch VMID enable */
395 	al_bool tx_q_prefetch_vmid_en;
396 
397 	/* Tx prefetch VMID */
398 	unsigned int tx_q_prefetch_vmid;
399 
400 	/**********************************************************************
401 	 * Tx completion VMID
402 	 **********************************************************************/
403 	/* Tx completion VMID enable */
404 	al_bool tx_q_compl_vmid_en;
405 
406 	/* Tx completion VMID */
407 	unsigned int tx_q_compl_vmid;
408 };
409 
410 /** UDMA per Rx queue advanced VMID control configuration */
411 struct al_udma_gen_vmid_advanced_rx_q_conf {
412 	/**********************************************************************
413 	 * Rx Data VMID
414 	 **********************************************************************/
415 	/* Rx data VMID enable */
416 	al_bool rx_q_data_vmid_en;
417 
418 	/*
419 	 * For Rx data writes, replacement bits for the original address.
420 	 * The number of bits replaced is determined according to
421 	 * 'rx_q_addr_hi_sel'
422 	 */
423 	unsigned int rx_q_addr_hi;
424 
425 	/*
426 	 * For Rx data writes, 6 bits serving the number of bits taken from the
427 	 * extra register on account of bits coming from the original address
428 	 * field.
429 	 */
430 	unsigned int rx_q_addr_hi_sel;
431 
432 	/*
433 	 * Rx data write VMID
434 	 * Masked per bit with 'rx_q_data_vmid_mask'
435 	 */
436 	unsigned int rx_q_data_vmid;
437 
438 	/* Rx data write VMID mask */
439 	unsigned int rx_q_data_vmid_mask;
440 
441 	/**********************************************************************
442 	 * Rx Data Buffer 2 VMID
443 	 **********************************************************************/
444 	/* Rx data buff2 VMID enable */
445 	al_bool rx_q_data_buff2_vmid_en;
446 
447 	/*
448 	 * For Rx data buff2 writes, replacement bits for the original address.
449 	 * The number of bits replaced is determined according to
450 	 * 'rx_q_data_buff2_addr_hi_sel'
451 	 */
452 	unsigned int rx_q_data_buff2_addr_hi;
453 
454 	/*
455 	 * For Rx data buff2 writes, 6 bits serving the number of bits taken
456 	 * from the extra register on account of bits coming from the original
457 	 * address field.
458 	 */
459 	unsigned int rx_q_data_buff2_addr_hi_sel;
460 
461 	/*
462 	 * Rx data buff2 write VMID
463 	 * Masked per bit with 'rx_q_data_buff2_mask'
464 	 */
465 	unsigned int rx_q_data_buff2_vmid;
466 
467 	/* Rx data buff2 write VMID mask */
468 	unsigned int rx_q_data_buff2_mask;
469 
470 	/**********************************************************************
471 	 * Rx DDP VMID
472 	 **********************************************************************/
473 	/* Rx DDP write VMID enable */
474 	al_bool rx_q_ddp_vmid_en;
475 
476 	/*
477 	 * For Rx DDP writes, replacement bits for the original address.
478 	 * The number of bits replaced is determined according to
479 	 * 'rx_q_ddp_addr_hi_sel'
480 	 */
481 	unsigned int rx_q_ddp_addr_hi;
482 
483 	/*
484 	 * For Rx DDP writes, 6 bits serving the number of bits taken from the
485 	 * extra register on account of bits coming from the original address
486 	 * field.
487 	 */
488 	unsigned int rx_q_ddp_addr_hi_sel;
489 
490 	/*
491 	 * Rx DDP write VMID
492 	 * Masked per bit with 'rx_q_ddp_mask'
493 	 */
494 	unsigned int rx_q_ddp_vmid;
495 
496 	/* Rx DDP write VMID mask */
497 	unsigned int rx_q_ddp_mask;
498 
499 	/**********************************************************************
500 	 * Rx prefetch VMID
501 	 **********************************************************************/
502 	/* Rx prefetch VMID enable */
503 	al_bool rx_q_prefetch_vmid_en;
504 
505 	/* Rx prefetch VMID */
506 	unsigned int rx_q_prefetch_vmid;
507 
508 	/**********************************************************************
509 	 * Rx completion VMID
510 	 **********************************************************************/
511 	/* Rx completion VMID enable */
512 	al_bool rx_q_compl_vmid_en;
513 
514 	/* Rx completion VMID */
515 	unsigned int rx_q_compl_vmid;
516 };
517 
518 /**
519  * Header split, buffer 2 per queue configuration
520  * When header split is enabled, Buffer_2 is used as an address for the header
521  * data. Buffer_2 is defined as 32-bits in the RX descriptor and it is defined
522  * that the MSB ([63:32]) of Buffer_1 is used as address [63:32] for the header
523  * address.
524  */
525 struct al_udma_gen_hdr_split_buff2_q_conf {
526 	/*
527 	 * MSB of the 64-bit address (bits [63:32]) that can be used for header
528 	 * split for this queue
529 	 */
530 	unsigned int addr_msb;
531 
532 	/*
533 	 * Determine how to select the MSB (bits [63:32]) of the address when
534 	 * header split is enabled (4 bits, one per byte)
535 	 * - Bits [3:0]:
536 	 *	[0] – selector for bits [39:32]
537 	 *	[1] – selector for bits [47:40]
538 	 *	[2] – selector for bits [55:48]
539 	 *	[3] – selector for bits [63:55]
540 	 * - Bit value:
541 	 *	0 – Use Buffer_1 (legacy operation)
542 	 *	1 – Use the queue configuration 'addr_msb'
543 	 */
544 	unsigned int add_msb_sel;
545 };
546 
547 /* Report Error - to be used for abort */
548 void al_udma_err_report(struct al_udma *udma);
549 
550 /* Statistics - TBD */
551 void al_udma_stats_get(struct al_udma *udma);
552 
553 /* Misc configurations */
554 /* Configure AXI configuration */
555 int al_udma_axi_set(struct udma_gen_axi *axi_regs,
556 		    struct al_udma_axi_conf *axi);
557 
558 /* Configure UDMA AXI M2S configuration */
559 int al_udma_m2s_axi_set(struct al_udma *udma,
560 			struct al_udma_m2s_axi_conf *axi_m2s);
561 
562 /* Configure UDMA AXI S2M configuration */
563 int al_udma_s2m_axi_set(struct al_udma *udma,
564 			struct al_udma_s2m_axi_conf *axi_s2m);
565 
566 /* Configure M2S packet len */
567 int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
568 				    struct al_udma_m2s_pkt_len_conf *conf);
569 
570 /* Configure M2S UDMA descriptor prefetch */
571 int al_udma_m2s_pref_set(struct al_udma *udma,
572 			 struct al_udma_m2s_desc_pref_conf *conf);
573 int al_udma_m2s_pref_get(struct al_udma *udma,
574 			 struct al_udma_m2s_desc_pref_conf *conf);
575 
576 /* set m2s packet's max descriptors (including meta descriptors) */
577 #define AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET	31
578 int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs);
579 
580 /* set s2m packets' max descriptors */
581 #define AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET	31
582 int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs);
583 
584 
585 /* Configure S2M UDMA descriptor prefetch */
586 int al_udma_s2m_pref_set(struct al_udma *udma,
587 			 struct al_udma_s2m_desc_pref_conf *conf);
588 int al_udma_m2s_pref_get(struct al_udma *udma,
589 			 struct al_udma_m2s_desc_pref_conf *conf);
590 
591 /* Configure S2M UDMA data write */
592 int al_udma_s2m_data_write_set(struct al_udma *udma,
593 			       struct al_udma_s2m_data_write_conf *conf);
594 
595 /* Configure the s2m full line write feature */
596 int al_udma_s2m_full_line_write_set(struct al_udma *umda, al_bool enable);
597 
598 /* Configure S2M UDMA completion */
599 int al_udma_s2m_completion_set(struct al_udma *udma,
600 			       struct al_udma_s2m_completion_conf *conf);
601 
602 /* Configure the M2S UDMA scheduling mode */
603 int al_udma_m2s_sc_set(struct al_udma *udma,
604 		       struct al_udma_m2s_dwrr_conf *sched);
605 
606 /* Configure the M2S UDMA rate limitation */
607 int al_udma_m2s_rlimit_set(struct al_udma *udma,
608 			   struct al_udma_m2s_rlimit_mode *mode);
609 int al_udma_m2s_rlimit_reset(struct al_udma *udma);
610 
611 /* Configure the M2S Stream rate limitation */
612 int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
613 				struct al_udma_m2s_rlimit_cfg *conf);
614 int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
615 				enum al_udma_m2s_rlimit_action act);
616 
617 /* Configure the M2S UDMA Q rate limitation */
618 int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
619 			     struct al_udma_m2s_rlimit_cfg *conf);
620 int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
621 			     enum al_udma_m2s_rlimit_action act);
622 
623 /* Configure the M2S UDMA Q scheduling mode */
624 int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
625 			 struct al_udma_m2s_q_dwrr_conf *conf);
626 int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set);
627 int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q);
628 
629 /* M2S UDMA completion and application timeouts */
630 int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
631 				  struct al_udma_m2s_comp_timeouts *conf);
632 int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
633 				  struct al_udma_m2s_comp_timeouts *conf);
634 
635 /* UDMA get revision */
636 static INLINE unsigned int al_udma_get_revision(struct unit_regs __iomem *unit_regs)
637 {
638 	return (al_reg_read32(&unit_regs->gen.dma_misc.revision)
639 			& UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_MASK) >>
640 			UDMA_GEN_DMA_MISC_REVISION_PROGRAMMING_ID_SHIFT;
641 }
642 
643 /**
644  * S2M UDMA Configure the expected behavior of Rx/S2M UDMA when there are no Rx Descriptors.
645  *
646  * @param udma
647  * @param drop_packet when set to true, the UDMA will drop packet.
648  * @param gen_interrupt when set to true, the UDMA will generate
649  *        no_desc_hint interrupt when a packet received and the UDMA
650  *	  doesn't find enough free descriptors for it.
651  * @param wait_for_desc_timeout timeout in SB cycles to wait for new
652  *	  descriptors before dropping the packets.
653  *	  Notes:
654  *		- The hint interrupt is raised immediately without waiting
655  *		for new descs.
656  *		- value 0 means wait for ever.
657  *
658  * Notes:
659  * - When get_interrupt is set, the API won't program the iofic to unmask this
660  * interrupt, in this case the callee should take care for doing that unmask
661  * using the al_udma_iofic_config() API.
662  *
663  * - The hardware's default configuration is: no drop packet, generate hint
664  * interrupt.
665  * - This API must be called once and before enabling the UDMA
666  *
667  * @return 0 if no error found.
668  */
669 int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout);
670 
671 /**
672  * S2M UDMA configure a queue's completion update
673  *
674  * @param q_udma
675  * @param enable set to true to enable completion update
676  *
677  * completion update better be disabled for tx queues as those descriptors
678  * doesn't carry useful information, thus disabling it saves DMA accesses.
679  *
680  * @return 0 if no error found.
681  */
682 int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable);
683 
684 /**
685  * S2M UDMA configure a queue's completion descriptors coalescing
686  *
687  * @param q_udma
688  * @param enable set to true to enable completion coalescing
689  * @param coal_timeout in South Bridge cycles.
690  *
691  * @return 0 if no error found.
692  */
693 int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t coal_timeout);
694 
695 /**
696  * S2M UDMA configure completion descriptors write burst parameters
697  *
698  * @param udma
699  * @param burst_size completion descriptors write burst size in bytes.
700  *
701  * @return 0 if no error found.
702  */int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
703 		 burst_size);
704 
705 /**
706  * S2M UDMA configure a queue's completion header split
707  *
708  * @param q_udma
709  * @param enable set to true to enable completion header split
710  * @param force_hdr_split the header split length will be taken from the queue configuration
711  * @param hdr_len header split length.
712  *
713  * @return 0 if no error found.
714  */
715 int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q,
716 					 al_bool enable,
717 					 al_bool force_hdr_split,
718 					 uint32_t hdr_len);
719 
720 /* S2M UDMA per queue completion configuration */
721 int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
722 			   struct al_udma_s2m_q_comp_conf *conf);
723 
724 /** UDMA VMID control configuration */
725 void al_udma_gen_vmid_conf_set(
726 	struct unit_regs __iomem	*unit_regs,
727 	struct al_udma_gen_vmid_conf	*conf);
728 
729 /** UDMA VMID MSIX control configuration */
730 void al_udma_gen_vmid_msix_conf_set(
731 	struct unit_regs __iomem		*unit_regs,
732 	struct al_udma_gen_vmid_msix_conf	*conf);
733 
734 /** UDMA VMID control advanced Tx queue configuration */
735 void al_udma_gen_vmid_advanced_tx_q_conf(
736 	struct al_udma_q				*q,
737 	struct al_udma_gen_vmid_advanced_tx_q_conf	*conf);
738 
739 /** UDMA VMID control advanced Rx queue configuration */
740 void al_udma_gen_vmid_advanced_rx_q_conf(
741 	struct al_udma_q				*q,
742 	struct al_udma_gen_vmid_advanced_rx_q_conf	*conf);
743 
744 /** UDMA header split buffer 2 Rx queue configuration */
745 void al_udma_gen_hdr_split_buff2_rx_q_conf(
746 	struct al_udma_q				*q,
747 	struct al_udma_gen_hdr_split_buff2_q_conf	*conf);
748 
749 /* *INDENT-OFF* */
750 #ifdef __cplusplus
751 }
752 #endif
753 /* *INDENT-ON* */
754 /** @} end of UDMA config group */
755 #endif /* __AL_HAL_UDMA_CONFIG_H__ */
756