xref: /freebsd/sys/contrib/alpine-hal/al_hal_udma_config.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2 *******************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 /**
38  * @file   al_hal_udma_config.c
39  *
40  * @brief  Universal DMA HAL driver for configurations
41  *
42  */
43 
44 #include <al_hal_common.h>
45 #include <al_hal_udma_regs.h>
46 #include <al_hal_udma_config.h>
47 
48 /**************** Misc configurations *********************/
49 /** Configure AXI generic configuration */
50 int al_udma_axi_set(struct udma_gen_axi *axi_regs,
51 					struct al_udma_axi_conf *axi)
52 {
53         uint32_t reg;
54 
55         al_reg_write32(&axi_regs->cfg_1, axi->axi_timeout);
56 
57         reg = al_reg_read32(&axi_regs->cfg_2);
58         reg &= ~UDMA_GEN_AXI_CFG_2_ARB_PROMOTION_MASK;
59         reg |= axi->arb_promotion;
60         al_reg_write32(&axi_regs->cfg_2, reg);
61 
62         reg = al_reg_read32(&axi_regs->endian_cfg);
63         if (axi->swap_8_bytes == AL_TRUE)
64                 reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN;
65         else
66                 reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_64B_EN;
67 
68         if (axi->swap_s2m_data == AL_TRUE)
69                 reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA;
70         else
71                 reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DATA;
72 
73         if (axi->swap_s2m_desc == AL_TRUE)
74                 reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC;
75         else
76                 reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_S2M_DESC;
77 
78         if (axi->swap_m2s_data == AL_TRUE)
79                 reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA;
80         else
81                 reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DATA;
82 
83         if (axi->swap_m2s_desc == AL_TRUE)
84                 reg |= UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC;
85         else
86                 reg &= ~UDMA_GEN_AXI_ENDIAN_CFG_SWAP_M2S_DESC;
87 
88         al_reg_write32(&axi_regs->endian_cfg, reg);
89         return 0;
90 }
91 
92 /* Configure UDMA AXI M2S configuration */
93 /** Configure AXI M2S submaster */
94 static int al_udma_m2s_axi_sm_set(struct al_udma_axi_submaster *m2s_sm,
95 				      uint32_t *cfg_1, uint32_t *cfg_2,
96 				      uint32_t *cfg_max_beats)
97 {
98 	uint32_t reg;
99 	reg = al_reg_read32(cfg_1);
100 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK;
101 	reg |= m2s_sm->id & UDMA_AXI_M2S_COMP_WR_CFG_1_AWID_MASK;
102 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK;
103 	reg |= (m2s_sm->cache_type <<
104 			UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_SHIFT) &
105 		UDMA_AXI_M2S_COMP_WR_CFG_1_AWCACHE_MASK;
106 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
107 	reg |= (m2s_sm->burst << UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_SHIFT) &
108 		UDMA_AXI_M2S_COMP_WR_CFG_1_AWBURST_MASK;
109 	al_reg_write32(cfg_1, reg);
110 
111 	reg = al_reg_read32(cfg_2);
112 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK;
113 	reg |= m2s_sm->used_ext & UDMA_AXI_M2S_COMP_WR_CFG_2_AWUSER_MASK;
114 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK;
115 	reg |= (m2s_sm->bus_size <<
116 			UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_SHIFT) &
117 		UDMA_AXI_M2S_COMP_WR_CFG_2_AWSIZE_MASK;
118 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK;
119 	reg |= (m2s_sm->qos << UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_SHIFT) &
120 		UDMA_AXI_M2S_COMP_WR_CFG_2_AWQOS_MASK;
121 	reg &= ~UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK;
122 	reg |= (m2s_sm->prot << UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_SHIFT) &
123 		UDMA_AXI_M2S_COMP_WR_CFG_2_AWPROT_MASK;
124 	al_reg_write32(cfg_2, reg);
125 
126 	reg = al_reg_read32(cfg_max_beats);
127 	reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
128 	reg |= m2s_sm->max_beats &
129 			UDMA_AXI_M2S_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
130 	al_reg_write32(cfg_max_beats, reg);
131 
132 	return 0;
133 }
134 
135 /** Configure UDMA AXI M2S configuration */
136 int al_udma_m2s_axi_set(struct al_udma *udma,
137 					struct al_udma_m2s_axi_conf *axi_m2s)
138 {
139 	uint32_t reg;
140 
141 	al_udma_m2s_axi_sm_set(&axi_m2s->comp_write,
142 			       &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_1,
143 			       &udma->udma_regs->m2s.axi_m2s.comp_wr_cfg_2,
144 			       &udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
145 
146 	al_udma_m2s_axi_sm_set(&axi_m2s->data_read,
147 			       &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_1,
148 			       &udma->udma_regs->m2s.axi_m2s.data_rd_cfg_2,
149 			       &udma->udma_regs->m2s.axi_m2s.data_rd_cfg);
150 
151 	al_udma_m2s_axi_sm_set(&axi_m2s->desc_read,
152 			       &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_1,
153 			       &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_2,
154 			       &udma->udma_regs->m2s.axi_m2s.desc_rd_cfg_3);
155 
156 	reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg);
157 	if (axi_m2s->break_on_max_boundary == AL_TRUE)
158 		reg |= UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY;
159 	else
160 		reg &= ~UDMA_AXI_M2S_DATA_RD_CFG_ALWAYS_BREAK_ON_MAX_BOUDRY;
161 	al_reg_write32(&udma->udma_regs->m2s.axi_m2s.data_rd_cfg, reg);
162 
163 	reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1);
164 	reg &= ~UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
165 	reg |= (axi_m2s->min_axi_beats <<
166 			UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) &
167 		UDMA_AXI_M2S_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
168 	al_reg_write32(&udma->udma_regs->m2s.axi_m2s.desc_wr_cfg_1, reg);
169 
170 	reg = al_reg_read32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg);
171 	reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK;
172 	reg |= axi_m2s->ostand_max_data_read &
173 			UDMA_AXI_M2S_OSTAND_CFG_MAX_DATA_RD_MASK;
174 	reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK;
175 	reg |= (axi_m2s->ostand_max_desc_read <<
176 			UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_SHIFT) &
177 		UDMA_AXI_M2S_OSTAND_CFG_MAX_DESC_RD_MASK;
178 	reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK;
179 	reg |= (axi_m2s->ostand_max_comp_req <<
180 			UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_SHIFT) &
181 		UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_REQ_MASK;
182 	reg &= ~UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK;
183 	reg |= (axi_m2s->ostand_max_comp_write <<
184 			UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_SHIFT) &
185 		UDMA_AXI_M2S_OSTAND_CFG_MAX_COMP_DATA_WR_MASK;
186 	al_reg_write32(&udma->udma_regs->m2s.axi_m2s.ostand_cfg, reg);
187 	return 0;
188 }
189 
190 /** Configure AXI S2M submaster */
191 static int al_udma_s2m_axi_sm_set(struct al_udma_axi_submaster *s2m_sm,
192 				      uint32_t *cfg_1, uint32_t *cfg_2,
193 				      uint32_t *cfg_max_beats)
194 {
195 	uint32_t reg;
196 	reg = al_reg_read32(cfg_1);
197 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
198 	reg |= s2m_sm->id & UDMA_AXI_S2M_COMP_WR_CFG_1_AWID_MASK;
199 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK;
200 	reg |= (s2m_sm->cache_type <<
201 			UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_SHIFT) &
202 		UDMA_AXI_S2M_COMP_WR_CFG_1_AWCACHE_MASK;
203 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
204 	reg |= (s2m_sm->burst << UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_SHIFT) &
205 		UDMA_AXI_S2M_COMP_WR_CFG_1_AWBURST_MASK;
206 	al_reg_write32(cfg_1, reg);
207 
208 	reg = al_reg_read32(cfg_2);
209 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK;
210 	reg |= s2m_sm->used_ext & UDMA_AXI_S2M_COMP_WR_CFG_2_AWUSER_MASK;
211 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK;
212 	reg |= (s2m_sm->bus_size << UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_SHIFT) &
213 		UDMA_AXI_S2M_COMP_WR_CFG_2_AWSIZE_MASK;
214 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK;
215 	reg |= (s2m_sm->qos << UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_SHIFT) &
216 		UDMA_AXI_S2M_COMP_WR_CFG_2_AWQOS_MASK;
217 	reg &= ~UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK;
218 	reg |= (s2m_sm->prot << UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_SHIFT) &
219 		UDMA_AXI_S2M_COMP_WR_CFG_2_AWPROT_MASK;
220 	al_reg_write32(cfg_2, reg);
221 
222 	reg = al_reg_read32(cfg_max_beats);
223 	reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
224 	reg |= s2m_sm->max_beats &
225 			UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK;
226 	al_reg_write32(cfg_max_beats, reg);
227 
228 	return 0;
229 }
230 
231 /** Configure UDMA AXI S2M configuration */
232 int al_udma_s2m_axi_set(struct al_udma *udma,
233 				struct al_udma_s2m_axi_conf *axi_s2m)
234 {
235 
236 	uint32_t reg;
237 
238 	al_udma_s2m_axi_sm_set(&axi_s2m->data_write,
239 			       &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_1,
240 			       &udma->udma_regs->s2m.axi_s2m.data_wr_cfg_2,
241 			       &udma->udma_regs->s2m.axi_s2m.data_wr_cfg);
242 
243 	al_udma_s2m_axi_sm_set(&axi_s2m->desc_read,
244 			       &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_4,
245 			       &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_5,
246 			       &udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3);
247 
248 	al_udma_s2m_axi_sm_set(&axi_s2m->comp_write,
249 			       &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_1,
250 			       &udma->udma_regs->s2m.axi_s2m.comp_wr_cfg_2,
251 			       &udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1);
252 
253 	reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3);
254 	if (axi_s2m->break_on_max_boundary == AL_TRUE)
255 		reg |= UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY;
256 	else
257 		reg &= ~UDMA_AXI_S2M_DESC_RD_CFG_3_ALWAYS_BREAK_ON_MAX_BOUDRY;
258 	al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_rd_cfg_3, reg);
259 
260 	reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1);
261 	reg &= ~UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
262 	reg |= (axi_s2m->min_axi_beats <<
263 			UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT) &
264 		UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK;
265 	al_reg_write32(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1, reg);
266 
267 	reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd);
268 	reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK;
269 	reg |= axi_s2m->ostand_max_desc_read &
270 			UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_DESC_RD_OSTAND_MASK;
271 
272 	reg &= ~UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK;
273 	reg |= (axi_s2m->ack_fifo_depth <<
274 			UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_SHIFT) &
275 		UDMA_AXI_S2M_OSTAND_CFG_RD_MAX_STREAM_ACK_MASK;
276 
277 	al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_rd, reg);
278 
279 	reg = al_reg_read32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr);
280 	reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK;
281 	reg |= axi_s2m->ostand_max_data_req &
282 			UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_WR_OSTAND_MASK;
283 	reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK;
284 	reg |= (axi_s2m->ostand_max_data_write <<
285 		UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_SHIFT) &
286 		UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_DATA_BEATS_WR_OSTAND_MASK;
287 	reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK;
288 	reg |= (axi_s2m->ostand_max_comp_req <<
289 			UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_SHIFT) &
290 		UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_REQ_MASK;
291 	reg &= ~UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK;
292 	reg |= (axi_s2m->ostand_max_comp_write <<
293 		UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_SHIFT) &
294 		UDMA_AXI_S2M_OSTAND_CFG_WR_MAX_COMP_DATA_WR_OSTAND_MASK;
295 	al_reg_write32(&udma->udma_regs->s2m.axi_s2m.ostand_cfg_wr, reg);
296 	return 0;
297 }
298 
299 /** M2S packet len configuration */
300 int al_udma_m2s_packet_size_cfg_set(struct al_udma *udma,
301 				struct al_udma_m2s_pkt_len_conf *conf)
302 {
303 	uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s.cfg_len);
304 	uint32_t max_supported_size = UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK;
305 
306 	al_assert(udma->type == UDMA_TX);
307 
308 	if (conf->encode_64k_as_zero == AL_TRUE)
309 		max_supported_size += 1;	/* 64K */
310 
311 	if (conf->max_pkt_size > max_supported_size) {
312 		al_err("udma [%s]: requested max_pkt_size (0x%x) exceeds the"
313 			"supported limit (0x%x)\n", udma->name,
314 			 conf->max_pkt_size, max_supported_size);
315 		return -EINVAL;
316 	}
317 
318 	reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K;
319 	if (conf->encode_64k_as_zero == AL_TRUE)
320 		reg |= UDMA_M2S_CFG_LEN_ENCODE_64K;
321 	else
322 		reg &= ~UDMA_M2S_CFG_LEN_ENCODE_64K;
323 
324 	reg &= ~UDMA_M2S_CFG_LEN_MAX_PKT_SIZE_MASK;
325 	reg |= conf->max_pkt_size;
326 
327 	al_reg_write32(&udma->udma_regs->m2s.m2s.cfg_len, reg);
328 	return 0;
329 }
330 
331 /** Report Error - to be used for abort */
332 void al_udma_err_report(struct al_udma *udma __attribute__((__unused__)))
333 {
334 	return;
335 }
336 
337 /** Statistics - TBD */
338 void al_udma_stats_get(struct al_udma *udma __attribute__((__unused__)))
339 {
340 	return;
341 }
342 
343 /** Configure UDMA M2S descriptor prefetch */
344 int al_udma_m2s_pref_set(struct al_udma *udma,
345 				struct al_udma_m2s_desc_pref_conf *conf)
346 {
347 	uint32_t reg;
348 
349 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1);
350 	reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK;
351 	reg |= conf->desc_fifo_depth;
352 	al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1, reg);
353 
354 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2);
355 
356 	if (conf->sch_mode == SRR)
357 		reg |= UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
358 	else if (conf->sch_mode == STRICT)
359 		reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
360 	else {
361 		al_err("udma [%s]: requested descriptor preferch arbiter "
362 			"mode (%d) is invalid\n", udma->name, conf->sch_mode);
363 		return -EINVAL;
364 	}
365 	reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK;
366 	reg |= conf->max_desc_per_packet &
367 		UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK;
368 	al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2, reg);
369 
370 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
371 	reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
372 	reg |= conf->min_burst_below_thr &
373 		UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
374 
375 	reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
376 	reg |=(conf->min_burst_above_thr <<
377 	       UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
378 		UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
379 
380 	reg &= ~UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
381 	reg |= (conf->pref_thr <<
382 			UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) &
383 		UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
384 
385 	al_reg_write32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3, reg);
386 
387 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.data_cfg);
388 	reg &= ~UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK;
389 	reg |= conf->data_fifo_depth &
390 			UDMA_M2S_RD_DATA_CFG_DATA_FIFO_DEPTH_MASK;
391 
392 	reg &= ~UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK;
393 	reg |= (conf->max_pkt_limit
394 			<< UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_SHIFT) &
395 		UDMA_M2S_RD_DATA_CFG_MAX_PKT_LIMIT_MASK;
396 	al_reg_write32(&udma->udma_regs->m2s.m2s_rd.data_cfg, reg);
397 
398 	return 0;
399 }
400 
401 /** Ger the M2S UDMA descriptor prefetch */
402 int al_udma_m2s_pref_get(struct al_udma *udma,
403 				struct al_udma_m2s_desc_pref_conf *conf)
404 {
405 	uint32_t reg;
406 
407 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_1);
408 	conf->desc_fifo_depth =
409 	    AL_REG_FIELD_GET(reg, UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK,
410 			UDMA_M2S_RD_DESC_PREF_CFG_1_FIFO_DEPTH_SHIFT);
411 
412 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2);
413 	if (reg & UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK)
414 		conf->sch_mode = SRR;
415 	else
416 		conf->sch_mode = STRICT;
417 	conf->max_desc_per_packet =
418 	    AL_REG_FIELD_GET(reg,
419 			UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK,
420 			UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT);
421 
422 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3);
423 
424 	conf->min_burst_below_thr =
425 	    AL_REG_FIELD_GET(reg,
426 			UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK,
427 			UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_SHIFT);
428 
429 	conf->min_burst_above_thr =
430 	    AL_REG_FIELD_GET(reg,
431 			UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
432 			UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT);
433 
434 	conf->pref_thr = AL_REG_FIELD_GET(reg,
435 				UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK,
436 				UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT);
437 	return 0;
438 }
439 
440 /* set max descriptors */
441 int al_udma_m2s_max_descs_set(struct al_udma *udma, uint8_t max_descs)
442 {
443 	uint32_t pref_thr = max_descs;
444 	uint32_t min_burst_above_thr = 4;
445 	al_assert(max_descs <= AL_UDMA_M2S_MAX_ALLOWED_DESCS_PER_PACKET);
446 	al_assert(max_descs > 0);
447 
448 	/* increase min_burst_above_thr so larger burst can be used to fetch
449 	 * descriptors */
450 	if (pref_thr >= 8)
451 		min_burst_above_thr = 8;
452 	else {
453 	/* don't set prefetch threshold too low so we can have the
454 	 * min_burst_above_thr >= 4 */
455 		pref_thr = 4;
456 	}
457 
458 	al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_2,
459 			      UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_MASK,
460 			      max_descs << UDMA_M2S_RD_DESC_PREF_CFG_2_MAX_DESC_PER_PKT_SHIFT);
461 
462 	al_reg_write32_masked(&udma->udma_regs->m2s.m2s_rd.desc_pref_cfg_3,
463 			      UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_MASK |
464 			      UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
465 			      (pref_thr << UDMA_M2S_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) |
466 			      (min_burst_above_thr << UDMA_M2S_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT));
467 
468 	return 0;
469 }
470 
471 /* set s2m max descriptors */
472 int al_udma_s2m_max_descs_set(struct al_udma *udma, uint8_t max_descs)
473 {
474 	uint32_t pref_thr = max_descs;
475 	uint32_t min_burst_above_thr = 4;
476 	al_assert(max_descs <= AL_UDMA_S2M_MAX_ALLOWED_DESCS_PER_PACKET);
477 	al_assert(max_descs > 0);
478 
479 	/* increase min_burst_above_thr so larger burst can be used to fetch
480 	 * descriptors */
481 	if (pref_thr >= 8)
482 		min_burst_above_thr = 8;
483 	else
484 	/* don't set prefetch threshold too low so we can have the
485 	 * min_burst_above_thr >= 4 */
486 		pref_thr = 4;
487 
488 	al_reg_write32_masked(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3,
489 			      UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK |
490 			      UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK,
491 			      (pref_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) |
492 			      (min_burst_above_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT));
493 
494 	return 0;
495 }
496 
497 int al_udma_s2m_full_line_write_set(struct al_udma *udma, al_bool enable)
498 {
499 	uint32_t	val = 0;
500 
501 	if (enable == AL_TRUE) {
502 		val = UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE;
503 		al_info("udma [%s]: full line write enabled\n", udma->name);
504 	}
505 
506 	al_reg_write32_masked(&udma->udma_regs->s2m.s2m_wr.data_cfg_2,
507 			UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE,
508 			val);
509 	return 0;
510 }
511 
512 /** Configure S2M UDMA descriptor prefetch */
513 int al_udma_s2m_pref_set(struct al_udma *udma,
514 				struct al_udma_s2m_desc_pref_conf *conf)
515 {
516 	uint32_t reg;
517 
518 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1);
519 	reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_1_FIFO_DEPTH_MASK;
520 	reg |= conf->desc_fifo_depth;
521 	al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_1, reg);
522 
523 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2);
524 
525 	if (conf->sch_mode == SRR)
526 		reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
527 	else if (conf->sch_mode == STRICT)
528 		reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PREF_FORCE_RR;
529 	else {
530 		al_err("udma [%s]: requested descriptor preferch arbiter "
531 			"mode (%d) is invalid\n", udma->name, conf->sch_mode);
532 		return -EINVAL;
533 	}
534 	if (conf->q_promotion == AL_TRUE)
535 		reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION;
536 	else
537 		reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_Q_PROMOTION;
538 
539 	if (conf->force_promotion == AL_TRUE)
540 		reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION;
541 	else
542 		reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_FORCE_PROMOTION;
543 
544 	if (conf->en_pref_prediction == AL_TRUE)
545 		reg |= UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION;
546 	else
547 		reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_EN_PREF_PREDICTION;
548 
549 	reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK;
550 	reg |= (conf->promotion_th
551 			<< UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_SHIFT) &
552 		UDMA_S2M_RD_DESC_PREF_CFG_2_PROMOTION_TH_MASK;
553 
554 	al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_2, reg);
555 
556 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3);
557 	reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
558 	reg |= (conf->pref_thr << UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_SHIFT) &
559 		UDMA_S2M_RD_DESC_PREF_CFG_3_PREF_THR_MASK;
560 
561 	reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
562 	reg |= conf->min_burst_below_thr &
563 		UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_BELOW_THR_MASK;
564 
565 	reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
566 	reg |=(conf->min_burst_above_thr <<
567 	       UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_SHIFT) &
568 		UDMA_S2M_RD_DESC_PREF_CFG_3_MIN_BURST_ABOVE_THR_MASK;
569 
570 	al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_3, reg);
571 
572 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4);
573 	reg &= ~UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK;
574 	reg |= conf->a_full_thr & UDMA_S2M_RD_DESC_PREF_CFG_4_A_FULL_THR_MASK;
575 	al_reg_write32(&udma->udma_regs->s2m.s2m_rd.desc_pref_cfg_4, reg);
576 
577 
578 	return 0;
579 }
580 
581 /* Configure S2M UDMA data write */
582 int al_udma_s2m_data_write_set(struct al_udma *udma,
583 				struct al_udma_s2m_data_write_conf *conf)
584 {
585 	uint32_t reg;
586 
587 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1);
588 	reg &= ~UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK;
589 	reg |= conf->data_fifo_depth &
590 			UDMA_S2M_WR_DATA_CFG_1_DATA_FIFO_DEPTH_MASK;
591 	reg &= ~UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK;
592 	reg |= (conf->max_pkt_limit <<
593 				UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_SHIFT) &
594 			UDMA_S2M_WR_DATA_CFG_1_MAX_PKT_LIMIT_MASK;
595 	reg &= ~UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK;
596 	reg |= (conf->fifo_margin <<
597 				UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_SHIFT) &
598 			UDMA_S2M_WR_DATA_CFG_1_FIFO_MARGIN_MASK;
599 	al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_1, reg);
600 
601 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2);
602 	reg &= ~UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK;
603 	reg |= conf->desc_wait_timer &
604 			UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK;
605 	reg &= ~(UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC |
606 		 UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC |
607 		 UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF |
608 		 UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE |
609 		 UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1);
610 	reg |= conf->flags &
611 		(UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC |
612 		 UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC |
613 		 UDMA_S2M_WR_DATA_CFG_2_WAIT_FOR_PREF |
614 		 UDMA_S2M_WR_DATA_CFG_2_FULL_LINE_MODE |
615 		 UDMA_S2M_WR_DATA_CFG_2_DIRECT_HDR_USE_BUF1);
616 	al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg);
617 
618 	return 0;
619 }
620 
621 /* Configure S2M UDMA completion */
622 int al_udma_s2m_completion_set(struct al_udma *udma,
623 				struct al_udma_s2m_completion_conf *conf)
624 {
625 	uint32_t reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_1c);
626 	reg &= ~UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
627 	reg |= conf->desc_size & UDMA_S2M_COMP_CFG_1C_DESC_SIZE_MASK;
628 	if (conf->cnt_words == AL_TRUE)
629 		reg |= UDMA_S2M_COMP_CFG_1C_CNT_WORDS;
630 	else
631 		reg &= ~UDMA_S2M_COMP_CFG_1C_CNT_WORDS;
632 	if (conf->q_promotion == AL_TRUE)
633 		reg |= UDMA_S2M_COMP_CFG_1C_Q_PROMOTION;
634 	else
635 		reg &= ~UDMA_S2M_COMP_CFG_1C_Q_PROMOTION;
636 	if (conf->force_rr == AL_TRUE)
637 		reg |= UDMA_S2M_COMP_CFG_1C_FORCE_RR;
638 	else
639 		reg &= ~UDMA_S2M_COMP_CFG_1C_FORCE_RR;
640 	reg &= ~UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK;
641 	reg |= (conf->q_free_min << UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_SHIFT) &
642 		UDMA_S2M_COMP_CFG_1C_Q_FREE_MIN_MASK;
643 	al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_1c, reg);
644 
645 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_comp.cfg_2c);
646 	reg &= ~UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK;
647 	reg |= conf->comp_fifo_depth
648 				& UDMA_S2M_COMP_CFG_2C_COMP_FIFO_DEPTH_MASK;
649 	reg &= ~UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
650 	reg |= (conf->unack_fifo_depth
651 			<< UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_SHIFT) &
652 			UDMA_S2M_COMP_CFG_2C_UNACK_FIFO_DEPTH_MASK;
653 	al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_2c, reg);
654 
655 	al_reg_write32(&udma->udma_regs->s2m.s2m_comp.cfg_application_ack,
656 		       conf->timeout);
657 	return 0;
658 }
659 
660 /** Configure the M2S UDMA scheduling mode */
661 int al_udma_m2s_sc_set(struct al_udma *udma,
662 					struct al_udma_m2s_dwrr_conf *sched)
663 {
664 	uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched);
665 
666 	if (sched->enable_dwrr == AL_TRUE)
667 		reg |= UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR;
668 	else
669 		reg &= ~UDMA_M2S_DWRR_CFG_SCHED_EN_DWRR;
670 
671 	if (sched->pkt_mode == AL_TRUE)
672 		reg |= UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN;
673 	else
674 		reg &= ~UDMA_M2S_DWRR_CFG_SCHED_PKT_MODE_EN;
675 
676 	reg &= ~UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_MASK;
677 	reg |= sched->weight << UDMA_M2S_DWRR_CFG_SCHED_WEIGHT_INC_SHIFT;
678 	reg &= ~UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_MASK;
679 	reg |= sched->inc_factor << UDMA_M2S_DWRR_CFG_SCHED_INC_FACTOR_SHIFT;
680 	al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.cfg_sched, reg);
681 
682 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt);
683 	reg &= ~UDMA_M2S_DWRR_CTRL_DEFICIT_CNT_INIT_MASK;
684 	reg |= sched->deficit_init_val;
685 	al_reg_write32(&udma->udma_regs->m2s.m2s_dwrr.ctrl_deficit_cnt, reg);
686 
687 	return 0;
688 }
689 
690 /** Configure the M2S UDMA rate limitation */
691 int al_udma_m2s_rlimit_set(struct al_udma *udma,
692 					struct al_udma_m2s_rlimit_mode *mode)
693 {
694 	uint32_t reg = al_reg_read32(
695 				&udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg);
696 
697 	if (mode->pkt_mode_en == AL_TRUE)
698 		reg |= UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN;
699 	else
700 		reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_PKT_MODE_EN;
701 	reg &= ~UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK;
702 	reg |= mode->short_cycle_sz &
703 	    UDMA_M2S_RATE_LIMITER_GEN_CFG_SHORT_CYCLE_SIZE_MASK;
704 	al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.gen_cfg, reg);
705 
706 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token);
707 	reg &= ~UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK;
708 	reg |= mode->token_init_val &
709 			UDMA_M2S_RATE_LIMITER_CTRL_TOKEN_RST_MASK;
710 	al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_token, reg);
711 
712 	return 0;
713 }
714 
715 int al_udma_m2s_rlimit_reset(struct al_udma *udma)
716 {
717 	uint32_t reg = al_reg_read32(
718 			&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt);
719 	reg |= UDMA_M2S_RATE_LIMITER_CTRL_CYCLE_CNT_RST;
720 	al_reg_write32(&udma->udma_regs->m2s.m2s_rate_limiter.ctrl_cycle_cnt,
721 						reg);
722 	return 0;
723 }
724 
725 /** Configure the Stream/Q rate limitation */
726 static int al_udma_common_rlimit_set(struct udma_rlimit_common *regs,
727 					  struct al_udma_m2s_rlimit_cfg *conf)
728 {
729 	uint32_t reg = al_reg_read32(&regs->cfg_1s);
730 	/* mask max burst size, and enable/pause control bits */
731 	reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK;
732 	reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN;
733 	reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE;
734 	reg |= conf->max_burst_sz &
735 		UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_MAX_BURST_SIZE_MASK;
736 	al_reg_write32(&regs->cfg_1s, reg);
737 
738 	reg = al_reg_read32(&regs->cfg_cycle);
739 	reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK;
740 	reg |= conf->long_cycle_sz &
741 		UDMA_M2S_STREAM_RATE_LIMITER_CFG_CYCLE_LONG_CYCLE_SIZE_MASK;
742 	al_reg_write32(&regs->cfg_cycle, reg);
743 
744 	reg = al_reg_read32(&regs->cfg_token_size_1);
745 	reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK;
746 	reg |= conf->long_cycle &
747 		UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_1_LONG_CYCLE_MASK;
748 	al_reg_write32(&regs->cfg_token_size_1, reg);
749 
750 	reg = al_reg_read32(&regs->cfg_token_size_2);
751 	reg &= ~UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK;
752 	reg |= conf->short_cycle &
753 		UDMA_M2S_STREAM_RATE_LIMITER_CFG_TOKEN_SIZE_2_SHORT_CYCLE_MASK;
754 	al_reg_write32(&regs->cfg_token_size_2, reg);
755 
756 	reg = al_reg_read32(&regs->mask);
757 	reg &= ~0xf;		/* only bits 0-3 defined */
758 	reg |= conf->mask & 0xf;
759 	al_reg_write32(&regs->mask, reg);
760 
761 	return 0;
762 }
763 
764 static int al_udma_common_rlimit_act(struct udma_rlimit_common *regs,
765 					  enum al_udma_m2s_rlimit_action act)
766 {
767 	uint32_t reg;
768 
769 	switch (act) {
770 	case AL_UDMA_STRM_RLIMIT_ENABLE:
771 		reg = al_reg_read32(&regs->cfg_1s);
772 		reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_EN;
773 		al_reg_write32(&regs->cfg_1s, reg);
774 		break;
775 	case AL_UDMA_STRM_RLIMIT_PAUSE:
776 		reg = al_reg_read32(&regs->cfg_1s);
777 		reg |= UDMA_M2S_STREAM_RATE_LIMITER_CFG_1S_PAUSE;
778 		al_reg_write32(&regs->cfg_1s, reg);
779 		break;
780 	case AL_UDMA_STRM_RLIMIT_RESET:
781 		reg = al_reg_read32(&regs->sw_ctrl);
782 		reg |= UDMA_M2S_STREAM_RATE_LIMITER_SW_CTRL_RST_TOKEN_CNT;
783 		al_reg_write32(&regs->sw_ctrl, reg);
784 		break;
785 	default:
786 		return -EINVAL;
787 	}
788 	return 0;
789 }
790 
791 /** Configure the M2S Stream rate limitation */
792 int al_udma_m2s_strm_rlimit_set(struct al_udma *udma,
793 				struct al_udma_m2s_rlimit_cfg *conf)
794 {
795 	struct udma_rlimit_common *rlimit_regs =
796 	    &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit;
797 
798 	return al_udma_common_rlimit_set(rlimit_regs, conf);
799 }
800 
801 int al_udma_m2s_strm_rlimit_act(struct al_udma *udma,
802 				enum al_udma_m2s_rlimit_action act)
803 {
804 	struct udma_rlimit_common *rlimit_regs =
805 	    &udma->udma_regs->m2s.m2s_stream_rate_limiter.rlimit;
806 
807 	if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) {
808 		al_err("udma [%s]: udma stream rate limit invalid action "
809 			"(%d)\n", udma->name, act);
810 		return -EINVAL;
811 	}
812 	return 0;
813 }
814 
815 /** Configure the M2S UDMA Q rate limitation */
816 int al_udma_m2s_q_rlimit_set(struct al_udma_q *udma_q,
817 				struct al_udma_m2s_rlimit_cfg *conf)
818 {
819 	struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit;
820 
821 	return al_udma_common_rlimit_set(rlimit_regs, conf);
822 }
823 
824 int al_udma_m2s_q_rlimit_act(struct al_udma_q *udma_q,
825 				enum al_udma_m2s_rlimit_action act)
826 {
827 	struct udma_rlimit_common *rlimit_regs = &udma_q->q_regs->m2s_q.rlimit;
828 
829 	if (al_udma_common_rlimit_act(rlimit_regs, act) == -EINVAL) {
830 		al_err("udma [%s %d]: udma stream rate limit invalid action "
831 				"(%d)\n",
832 				udma_q->udma->name, udma_q->qid, act);
833 		return -EINVAL;
834 	}
835 	return 0;
836 }
837 
838 /** Configure the M2S UDMA Q scheduling mode */
839 int al_udma_m2s_q_sc_set(struct al_udma_q *udma_q,
840 					struct al_udma_m2s_q_dwrr_conf *conf)
841 {
842 	uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1);
843 
844 	reg &= ~UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK;
845 	reg |= conf->max_deficit_cnt_sz &
846 		UDMA_M2S_Q_DWRR_CFG_1_MAX_DEFICIT_CNT_SIZE_MASK;
847 	if (conf->strict == AL_TRUE)
848 		reg |= UDMA_M2S_Q_DWRR_CFG_1_STRICT;
849 	else
850 		reg &= ~UDMA_M2S_Q_DWRR_CFG_1_STRICT;
851 	al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg);
852 
853 	reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_2);
854 	reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
855 	reg |= (conf->axi_qos << UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_SHIFT) &
856 	    UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
857 	reg &= ~UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
858 	reg |= conf->q_qos & UDMA_M2S_Q_DWRR_CFG_2_Q_QOS_MASK;
859 	al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_2, reg);
860 
861 	reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_3);
862 	reg &= ~UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK;
863 	reg |= conf->weight & UDMA_M2S_Q_DWRR_CFG_3_WEIGHT_MASK;
864 	al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_3, reg);
865 
866 	return 0;
867 }
868 
869 int al_udma_m2s_q_sc_pause(struct al_udma_q *udma_q, al_bool set)
870 {
871 	uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_cfg_1);
872 
873 	if (set == AL_TRUE)
874 		reg |= UDMA_M2S_Q_DWRR_CFG_1_PAUSE;
875 	else
876 		reg &= ~UDMA_M2S_Q_DWRR_CFG_1_PAUSE;
877 	al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_cfg_1, reg);
878 
879 	return 0;
880 }
881 
882 int al_udma_m2s_q_sc_reset(struct al_udma_q *udma_q)
883 {
884 	uint32_t reg = al_reg_read32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl);
885 
886 	reg |= UDMA_M2S_Q_DWRR_SW_CTRL_RST_CNT;
887 	al_reg_write32(&udma_q->q_regs->m2s_q.dwrr_sw_ctrl, reg);
888 
889 	return 0;
890 }
891 
892 /** M2S UDMA completion and application timeouts */
893 int al_udma_m2s_comp_timeouts_set(struct al_udma *udma,
894 				struct al_udma_m2s_comp_timeouts *conf)
895 {
896 	uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
897 
898 	if (conf->sch_mode == SRR)
899 		reg |= UDMA_M2S_COMP_CFG_1C_FORCE_RR;
900 	else if (conf->sch_mode == STRICT)
901 		reg &= ~UDMA_M2S_COMP_CFG_1C_FORCE_RR;
902 	else {
903 		al_err("udma [%s]: requested completion descriptor preferch "
904 				"arbiter mode (%d) is invalid\n",
905 				udma->name, conf->sch_mode);
906 		return -EINVAL;
907 	}
908 	if (conf->enable_q_promotion == AL_TRUE)
909 		reg |= UDMA_M2S_COMP_CFG_1C_Q_PROMOTION;
910 	else
911 		reg &= ~UDMA_M2S_COMP_CFG_1C_Q_PROMOTION;
912 	reg &= ~UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK;
913 	reg |=
914 	    conf->comp_fifo_depth << UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT;
915 
916 	reg &= ~UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK;
917 	reg |= conf->unack_fifo_depth
918 				<< UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT;
919 	al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_1c, reg);
920 
921 	al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_coal
922 							, conf->coal_timeout);
923 
924 	reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack);
925 	reg &= ~UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK;
926 	reg |= conf->app_timeout << UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT;
927 	al_reg_write32(&udma->udma_regs->m2s.m2s_comp.cfg_application_ack, reg);
928 	return 0;
929 }
930 
931 int al_udma_m2s_comp_timeouts_get(struct al_udma *udma,
932 					struct al_udma_m2s_comp_timeouts *conf)
933 {
934 	uint32_t reg = al_reg_read32(&udma->udma_regs->m2s.m2s_comp.cfg_1c);
935 
936 	if (reg & UDMA_M2S_COMP_CFG_1C_FORCE_RR)
937 		conf->sch_mode = SRR;
938 	else
939 		conf->sch_mode = STRICT;
940 
941 	if (reg & UDMA_M2S_COMP_CFG_1C_Q_PROMOTION)
942 		conf->enable_q_promotion = AL_TRUE;
943 	else
944 		conf->enable_q_promotion = AL_FALSE;
945 
946 	conf->comp_fifo_depth =
947 	    AL_REG_FIELD_GET(reg,
948 			     UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_MASK,
949 			     UDMA_M2S_COMP_CFG_1C_COMP_FIFO_DEPTH_SHIFT);
950 	conf->unack_fifo_depth =
951 	    AL_REG_FIELD_GET(reg,
952 			     UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_MASK,
953 			     UDMA_M2S_COMP_CFG_1C_UNACK_FIFO_DEPTH_SHIFT);
954 
955 	conf->coal_timeout = al_reg_read32(
956 				&udma->udma_regs->m2s.m2s_comp.cfg_coal);
957 
958 	reg = al_reg_read32(
959 			&udma->udma_regs->m2s.m2s_comp.cfg_application_ack);
960 
961 	conf->app_timeout =
962 	    AL_REG_FIELD_GET(reg,
963 			     UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_MASK,
964 			     UDMA_M2S_COMP_CFG_APPLICATION_ACK_TOUT_SHIFT);
965 
966 	return 0;
967 }
968 
969 /**
970  * S2M UDMA configure no descriptors behaviour
971  */
972 int al_udma_s2m_no_desc_cfg_set(struct al_udma *udma, al_bool drop_packet, al_bool gen_interrupt, uint32_t wait_for_desc_timeout)
973 {
974 	uint32_t reg;
975 
976 	reg = al_reg_read32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2);
977 
978 	if ((drop_packet == AL_TRUE) && (wait_for_desc_timeout == 0)) {
979 		al_err("udam [%s]: setting timeout to 0 will cause the udma to wait forever instead of dropping the packet", udma->name);
980 		return -EINVAL;
981 	}
982 
983 	if (drop_packet == AL_TRUE)
984 		reg |= UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC;
985 	else
986 		reg &= ~UDMA_S2M_WR_DATA_CFG_2_DROP_IF_NO_DESC;
987 
988 	if (gen_interrupt == AL_TRUE)
989 		reg |= UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC;
990 	else
991 		reg &= ~UDMA_S2M_WR_DATA_CFG_2_HINT_IF_NO_DESC;
992 
993 	AL_REG_FIELD_SET(reg, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_MASK, UDMA_S2M_WR_DATA_CFG_2_DESC_WAIT_TIMER_SHIFT, wait_for_desc_timeout);
994 
995 	al_reg_write32(&udma->udma_regs->s2m.s2m_wr.data_cfg_2, reg);
996 
997 	return 0;
998 }
999 
1000 /* S2M UDMA configure a queue's completion update */
1001 int al_udma_s2m_q_compl_updade_config(struct al_udma_q *udma_q, al_bool enable)
1002 {
1003 	uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
1004 
1005 	if (enable == AL_TRUE)
1006 		reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
1007 	else
1008 		reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
1009 
1010 	al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
1011 
1012 	return 0;
1013 }
1014 
1015 /* S2M UDMA configure a queue's completion descriptors coalescing */
1016 int al_udma_s2m_q_compl_coal_config(struct al_udma_q *udma_q, al_bool enable, uint32_t
1017 		coal_timeout)
1018 {
1019 	uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
1020 
1021 	if (enable == AL_TRUE)
1022 		reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
1023 	else
1024 		reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
1025 
1026 	al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
1027 
1028 	al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, coal_timeout);
1029 	return 0;
1030 }
1031 
1032 /* S2M UDMA configure completion descriptors write burst parameters */
1033 int al_udma_s2m_compl_desc_burst_config(struct al_udma *udma, uint16_t
1034 		burst_size)
1035 {
1036 	if ((burst_size != 64) && (burst_size != 128) && (burst_size != 256)) {
1037 		al_err("%s: invalid burst_size value (%d)\n", __func__,
1038 				burst_size);
1039 		return -EINVAL;
1040 	}
1041 
1042 	/* convert burst size from bytes to beats (16 byte) */
1043 	burst_size = burst_size / 16;
1044 	al_reg_write32_masked(&udma->udma_regs->s2m.axi_s2m.desc_wr_cfg_1,
1045 			UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_MASK |
1046 			UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_MASK,
1047 			burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MIN_AXI_BEATS_SHIFT |
1048 			burst_size << UDMA_AXI_S2M_DESC_WR_CFG_1_MAX_AXI_BEATS_SHIFT);
1049 	return 0;
1050 }
1051 
1052 /* S2M UDMA configure a queue's completion descriptors header split */
1053 int al_udma_s2m_q_compl_hdr_split_config(struct al_udma_q *udma_q, al_bool enable,
1054 					 al_bool force_hdr_split, uint32_t hdr_len)
1055 {
1056 	uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.pkt_cfg);
1057 
1058 	reg &= ~UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
1059 	reg &= ~UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
1060 	reg &= ~UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
1061 
1062 	if (enable == AL_TRUE) {
1063 		reg |= hdr_len & UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
1064 		reg |= UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
1065 
1066 		if (force_hdr_split == AL_TRUE)
1067 			reg |= UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
1068 	}
1069 
1070 	al_reg_write32(&udma_q->q_regs->s2m_q.pkt_cfg, reg);
1071 
1072 	return 0;
1073 }
1074 
1075 /* S2M UDMA per queue completion configuration */
1076 int al_udma_s2m_q_comp_set(struct al_udma_q *udma_q,
1077 					struct al_udma_s2m_q_comp_conf *conf)
1078 {
1079 	uint32_t reg = al_reg_read32(&udma_q->q_regs->s2m_q.comp_cfg);
1080 	if (conf->en_comp_ring_update == AL_TRUE)
1081 		reg |= UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
1082 	else
1083 		reg &= ~UDMA_S2M_Q_COMP_CFG_EN_COMP_RING_UPDATE;
1084 
1085 	if (conf->dis_comp_coal == AL_TRUE)
1086 		reg |= UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
1087 	else
1088 		reg &= ~UDMA_S2M_Q_COMP_CFG_DIS_COMP_COAL;
1089 
1090 	al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg, reg);
1091 
1092 	al_reg_write32(&udma_q->q_regs->s2m_q.comp_cfg_2, conf->comp_timer);
1093 
1094 	reg = al_reg_read32(&udma_q->q_regs->s2m_q.pkt_cfg);
1095 
1096 	reg &= ~UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
1097 	reg |= conf->hdr_split_size & UDMA_S2M_Q_PKT_CFG_HDR_SPLIT_SIZE_MASK;
1098 	if (conf->force_hdr_split == AL_TRUE)
1099 		reg |= UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
1100 	else
1101 		reg &= ~UDMA_S2M_Q_PKT_CFG_FORCE_HDR_SPLIT;
1102 	if (conf->en_hdr_split == AL_TRUE)
1103 		reg |= UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
1104 	else
1105 		reg &= ~UDMA_S2M_Q_PKT_CFG_EN_HDR_SPLIT;
1106 
1107 	al_reg_write32(&udma_q->q_regs->s2m_q.pkt_cfg, reg);
1108 
1109 	reg = al_reg_read32(&udma_q->q_regs->s2m_q.qos_cfg);
1110 	reg &= ~UDMA_S2M_QOS_CFG_Q_QOS_MASK;
1111 	reg |= conf->q_qos & UDMA_S2M_QOS_CFG_Q_QOS_MASK;
1112 	al_reg_write32(&udma_q->q_regs->s2m_q.qos_cfg, reg);
1113 
1114 	return 0;
1115 }
1116 
1117 /* UDMA Target-ID control configuration per queue */
1118 void al_udma_gen_tgtid_conf_queue_set(
1119 	struct unit_regs		*unit_regs,
1120 	struct al_udma_gen_tgtid_conf	*conf,
1121 	uint32_t qid)
1122 {
1123 	uint32_t *tx_tgtid_reg, *rx_tgtid_reg, *tx_tgtaddr_reg, *rx_tgtaddr_reg;
1124 	unsigned int rev_id;
1125 
1126 	al_assert(qid < DMA_MAX_Q);
1127 	rev_id = al_udma_get_revision(unit_regs);
1128 
1129 	/* Target-ID TX DESC EN */
1130 	al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
1131 			(conf->tx_q_conf[qid].desc_en << qid) <<
1132 			UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_DESC_EN_SHIFT,
1133 			(conf->tx_q_conf[qid].desc_en << qid) <<
1134 			UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_DESC_EN_SHIFT);
1135 
1136 	/* Target-ID TX QUEUE EN */
1137 	al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
1138 			(conf->tx_q_conf[qid].queue_en << qid) <<
1139 			UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_QUEUE_EN_SHIFT,
1140 			(conf->tx_q_conf[qid].queue_en << qid) <<
1141 			UDMA_GEN_TGTID_CFG_TGTID_0_TX_Q_TGTID_QUEUE_EN_SHIFT);
1142 
1143 	/* Target-ID RX DESC EN */
1144 	al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
1145 			(conf->rx_q_conf[qid].desc_en << qid) <<
1146 			UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_DESC_EN_SHIFT,
1147 			(conf->rx_q_conf[qid].desc_en << qid) <<
1148 			UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_DESC_EN_SHIFT);
1149 
1150 	/* Target-ID RX QUEUE EN */
1151 	al_reg_write32_masked(&unit_regs->gen.tgtid.cfg_tgtid_0,
1152 			(conf->rx_q_conf[qid].queue_en << qid) <<
1153 			UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_QUEUE_EN_SHIFT,
1154 			(conf->rx_q_conf[qid].queue_en << qid) <<
1155 			UDMA_GEN_TGTID_CFG_TGTID_0_RX_Q_TGTID_QUEUE_EN_SHIFT);
1156 
1157 	switch (qid) {
1158 	case 0:
1159 	case 1:
1160 		tx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_1;
1161 		rx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_3;
1162 		tx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_0;
1163 		rx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_2;
1164 		break;
1165 	case 2:
1166 	case 3:
1167 		tx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_2;
1168 		rx_tgtid_reg = &unit_regs->gen.tgtid.cfg_tgtid_4;
1169 		tx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_1;
1170 		rx_tgtaddr_reg = &unit_regs->gen.tgtaddr.cfg_tgtaddr_3;
1171 		break;
1172 	default:
1173 		al_assert(AL_FALSE);
1174 		return;
1175 	}
1176 
1177 	al_reg_write32_masked(tx_tgtid_reg,
1178 		UDMA_GEN_TGTID_CFG_TGTID_MASK(qid),
1179 		conf->tx_q_conf[qid].tgtid << UDMA_GEN_TGTID_CFG_TGTID_SHIFT(qid));
1180 
1181 	al_reg_write32_masked(rx_tgtid_reg,
1182 		UDMA_GEN_TGTID_CFG_TGTID_MASK(qid),
1183 		conf->rx_q_conf[qid].tgtid << UDMA_GEN_TGTID_CFG_TGTID_SHIFT(qid));
1184 
1185 	if (rev_id >= AL_UDMA_REV_ID_REV2) {
1186 		al_reg_write32_masked(tx_tgtaddr_reg,
1187 			UDMA_GEN_TGTADDR_CFG_MASK(qid),
1188 			conf->tx_q_conf[qid].tgtaddr << UDMA_GEN_TGTADDR_CFG_SHIFT(qid));
1189 
1190 		al_reg_write32_masked(rx_tgtaddr_reg,
1191 			UDMA_GEN_TGTADDR_CFG_MASK(qid),
1192 			conf->rx_q_conf[qid].tgtaddr << UDMA_GEN_TGTADDR_CFG_SHIFT(qid));
1193 	}
1194 }
1195 
1196 /* UDMA Target-ID control configuration */
1197 void al_udma_gen_tgtid_conf_set(
1198 	struct unit_regs		*unit_regs,
1199 	struct al_udma_gen_tgtid_conf	*conf)
1200 {
1201 	int i;
1202 
1203 	for (i = 0; i < DMA_MAX_Q; i++)
1204 		al_udma_gen_tgtid_conf_queue_set(unit_regs, conf, i);
1205 }
1206 
1207 /* UDMA Target-ID MSIX control configuration */
1208 void al_udma_gen_tgtid_msix_conf_set(
1209 	struct unit_regs			*unit_regs,
1210 	struct al_udma_gen_tgtid_msix_conf	*conf)
1211 {
1212 	al_reg_write32_masked(
1213 		&unit_regs->gen.tgtid.cfg_tgtid_0,
1214 		UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_ACCESS_EN |
1215 		UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_SEL,
1216 		(conf->access_en ? UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_ACCESS_EN : 0) |
1217 		(conf->sel ? UDMA_GEN_TGTID_CFG_TGTID_0_MSIX_TGTID_SEL : 0));
1218 }
1219