xref: /freebsd/sys/contrib/ncsw/Peripherals/FM/fman_ncsw.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /*
2  * Copyright 2008-2012 Freescale Semiconductor Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *     * Redistributions of source code must retain the above copyright
7  *       notice, this list of conditions and the following disclaimer.
8  *     * Redistributions in binary form must reproduce the above copyright
9  *       notice, this list of conditions and the following disclaimer in the
10  *       documentation and/or other materials provided with the distribution.
11  *     * Neither the name of Freescale Semiconductor nor the
12  *       names of its contributors may be used to endorse or promote products
13  *       derived from this software without specific prior written permission.
14  *
15  *
16  * ALTERNATIVELY, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") as published by the Free Software
18  * Foundation, either version 2 of that License or (at your option) any
19  * later version.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 
34 #include <linux/math64.h>
35 #include "fsl_fman.h"
36 #include "dpaa_integration_ext.h"
37 
38 uint32_t fman_get_bmi_err_event(struct fman_bmi_regs *bmi_rg)
39 {
40 	uint32_t	event, mask, force;
41 
42 	event = ioread32be(&bmi_rg->fmbm_ievr);
43 	mask = ioread32be(&bmi_rg->fmbm_ier);
44 	event &= mask;
45 	/* clear the forced events */
46 	force = ioread32be(&bmi_rg->fmbm_ifr);
47 	if (force & event)
48 		iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
49 	/* clear the acknowledged events */
50 	iowrite32be(event, &bmi_rg->fmbm_ievr);
51 	return event;
52 }
53 
54 uint32_t fman_get_qmi_err_event(struct fman_qmi_regs *qmi_rg)
55 {
56 	uint32_t	event, mask, force;
57 
58 	event = ioread32be(&qmi_rg->fmqm_eie);
59 	mask = ioread32be(&qmi_rg->fmqm_eien);
60 	event &= mask;
61 
62 	/* clear the forced events */
63 	force = ioread32be(&qmi_rg->fmqm_eif);
64 	if (force & event)
65 		iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
66 	/* clear the acknowledged events */
67 	iowrite32be(event, &qmi_rg->fmqm_eie);
68 	return event;
69 }
70 
71 uint32_t fman_get_dma_com_id(struct fman_dma_regs *dma_rg)
72 {
73 	return ioread32be(&dma_rg->fmdmtcid);
74 }
75 
76 uint64_t fman_get_dma_addr(struct fman_dma_regs *dma_rg)
77 {
78 	uint64_t addr;
79 
80 	addr = (uint64_t)ioread32be(&dma_rg->fmdmtal);
81 	addr |= ((uint64_t)(ioread32be(&dma_rg->fmdmtah)) << 32);
82 
83 	return addr;
84 }
85 
86 uint32_t fman_get_dma_err_event(struct fman_dma_regs *dma_rg)
87 {
88 	uint32_t status, mask;
89 
90 	status = ioread32be(&dma_rg->fmdmsr);
91 	mask = ioread32be(&dma_rg->fmdmmr);
92 
93 	/* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
94 	if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
95 		status &= ~DMA_STATUS_BUS_ERR;
96 
97 	/* clear relevant bits if mask has no DMA_MODE_ECC */
98 	if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
99 		status &= ~(DMA_STATUS_FM_SPDAT_ECC |
100 		        DMA_STATUS_READ_ECC |
101 				DMA_STATUS_SYSTEM_WRITE_ECC |
102 				DMA_STATUS_FM_WRITE_ECC);
103 
104 	/* clear set events */
105 	iowrite32be(status, &dma_rg->fmdmsr);
106 
107 	return status;
108 }
109 
110 uint32_t fman_get_fpm_err_event(struct fman_fpm_regs *fpm_rg)
111 {
112 	uint32_t	event;
113 
114 	event = ioread32be(&fpm_rg->fmfp_ee);
115 	/* clear the all occurred events */
116 	iowrite32be(event, &fpm_rg->fmfp_ee);
117 	return event;
118 }
119 
120 uint32_t fman_get_muram_err_event(struct fman_fpm_regs *fpm_rg)
121 {
122 	uint32_t	event, mask;
123 
124 	event = ioread32be(&fpm_rg->fm_rcr);
125 	mask = ioread32be(&fpm_rg->fm_rie);
126 
127 	/* clear MURAM event bit (do not clear IRAM event) */
128 	iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
129 
130 	if ((mask & FPM_MURAM_ECC_ERR_EX_EN))
131 		return event;
132 	else
133 		return 0;
134 }
135 
136 uint32_t fman_get_iram_err_event(struct fman_fpm_regs *fpm_rg)
137 {
138 	uint32_t    event, mask;
139 
140 	event = ioread32be(&fpm_rg->fm_rcr) ;
141 	mask = ioread32be(&fpm_rg->fm_rie);
142 	/* clear IRAM event bit (do not clear MURAM event) */
143 	iowrite32be(event & ~FPM_RAM_MURAM_ECC,
144 			&fpm_rg->fm_rcr);
145 
146 	if ((mask & FPM_IRAM_ECC_ERR_EX_EN))
147 		return event;
148 	else
149 		return 0;
150 }
151 
152 uint32_t fman_get_qmi_event(struct fman_qmi_regs *qmi_rg)
153 {
154 	uint32_t	event, mask, force;
155 
156 	event = ioread32be(&qmi_rg->fmqm_ie);
157 	mask = ioread32be(&qmi_rg->fmqm_ien);
158 	event &= mask;
159 	/* clear the forced events */
160 	force = ioread32be(&qmi_rg->fmqm_if);
161 	if (force & event)
162 		iowrite32be(force & ~event, &qmi_rg->fmqm_if);
163 	/* clear the acknowledged events */
164 	iowrite32be(event, &qmi_rg->fmqm_ie);
165 	return event;
166 }
167 
168 void fman_enable_time_stamp(struct fman_fpm_regs *fpm_rg,
169 				uint8_t count1ubit,
170 				uint16_t fm_clk_freq)
171 {
172 	uint32_t tmp;
173 	uint64_t frac;
174 	uint32_t intgr;
175 	uint32_t ts_freq = (uint32_t)(1 << count1ubit); /* in Mhz */
176 
177 	/* configure timestamp so that bit 8 will count 1 microsecond
178 	 * Find effective count rate at TIMESTAMP least significant bits:
179 	 * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
180 	 * Find frequency ratio between effective count rate and the clock:
181 	 * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
182 	 * 256/600 = 0.4266666... */
183 
184 	intgr = ts_freq / fm_clk_freq;
185 	/* we multiply by 2^16 to keep the fraction of the division
186 	 * we do not div back, since we write this value as a fraction
187 	 * see spec */
188 
189 	frac = ((uint64_t)ts_freq << 16) - ((uint64_t)intgr << 16) * fm_clk_freq;
190 	/* we check remainder of the division in order to round up if not int */
191 	if (do_div(frac, fm_clk_freq))
192 		frac++;
193 
194 	tmp = (intgr << FPM_TS_INT_SHIFT) | (uint16_t)frac;
195 	iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
196 
197 	/* enable timestamp with original clock */
198 	iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
199 }
200 
201 uint32_t fman_get_fpm_error_interrupts(struct fman_fpm_regs *fpm_rg)
202 {
203 	return ioread32be(&fpm_rg->fm_epi);
204 }
205 
206 
207 int fman_set_erratum_10gmac_a004_wa(struct fman_fpm_regs *fpm_rg)
208 {
209 	int timeout = 100;
210 
211 	iowrite32be(0x40000000, &fpm_rg->fmfp_extc);
212 
213 	while ((ioread32be(&fpm_rg->fmfp_extc) & 0x40000000) && --timeout)
214 		DELAY(10);
215 
216 	if (!timeout)
217 		return -EBUSY;
218 	return 0;
219 }
220 
221 void fman_set_ctrl_intr(struct fman_fpm_regs *fpm_rg,
222 			uint8_t event_reg_id,
223 			uint32_t enable_events)
224 {
225 	iowrite32be(enable_events, &fpm_rg->fmfp_cee[event_reg_id]);
226 }
227 
228 uint32_t fman_get_ctrl_intr(struct fman_fpm_regs *fpm_rg, uint8_t event_reg_id)
229 {
230 	return ioread32be(&fpm_rg->fmfp_cee[event_reg_id]);
231 }
232 
233 void fman_set_num_of_riscs_per_port(struct fman_fpm_regs *fpm_rg,
234 					uint8_t port_id,
235 					uint8_t num_fman_ctrls,
236 					uint32_t or_fman_ctrl)
237 {
238 	uint32_t tmp = 0;
239 
240 	tmp = (uint32_t)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
241 	/*TODO - maybe to put CTL# according to another criteria*/
242 	if (num_fman_ctrls == 2)
243 		tmp = FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
244 	/* order restoration */
245 	tmp |= (or_fman_ctrl << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | or_fman_ctrl;
246 
247 	iowrite32be(tmp, &fpm_rg->fmfp_prc);
248 }
249 
250 void fman_set_order_restoration_per_port(struct fman_fpm_regs *fpm_rg,
251 					uint8_t port_id,
252 					bool independent_mode,
253 					bool is_rx_port)
254 {
255 	uint32_t tmp = 0;
256 
257 	tmp = (uint32_t)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
258 	if (independent_mode) {
259 		if (is_rx_port)
260 			tmp |= (FPM_PRT_FM_CTL1 <<
261 				FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PRT_FM_CTL1;
262 		else
263 			tmp |= (FPM_PRT_FM_CTL2 <<
264 				FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PRT_FM_CTL2;
265 	} else {
266 		tmp |= (FPM_PRT_FM_CTL2|FPM_PRT_FM_CTL1);
267 
268 		/* order restoration */
269 		if (port_id % 2)
270 			tmp |= (FPM_PRT_FM_CTL1 <<
271 					FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
272 		else
273 			tmp |= (FPM_PRT_FM_CTL2 <<
274 					FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
275 	}
276 	iowrite32be(tmp, &fpm_rg->fmfp_prc);
277 }
278 
279 uint8_t fman_get_qmi_deq_th(struct fman_qmi_regs *qmi_rg)
280 {
281 	return (uint8_t)ioread32be(&qmi_rg->fmqm_gc);
282 }
283 
284 uint8_t fman_get_qmi_enq_th(struct fman_qmi_regs *qmi_rg)
285 {
286 	return (uint8_t)(ioread32be(&qmi_rg->fmqm_gc) >> 8);
287 }
288 
289 void fman_set_qmi_enq_th(struct fman_qmi_regs *qmi_rg, uint8_t val)
290 {
291 	uint32_t tmp_reg;
292 
293 	tmp_reg = ioread32be(&qmi_rg->fmqm_gc);
294 	tmp_reg &= ~QMI_CFG_ENQ_MASK;
295 	tmp_reg |= ((uint32_t)val << 8);
296 	iowrite32be(tmp_reg, &qmi_rg->fmqm_gc);
297 }
298 
299 void fman_set_qmi_deq_th(struct fman_qmi_regs *qmi_rg, uint8_t val)
300 {
301 	uint32_t tmp_reg;
302 
303 	tmp_reg = ioread32be(&qmi_rg->fmqm_gc);
304 	tmp_reg &= ~QMI_CFG_DEQ_MASK;
305 	tmp_reg |= (uint32_t)val;
306 	iowrite32be(tmp_reg, &qmi_rg->fmqm_gc);
307 }
308 
309 void fman_qmi_disable_dispatch_limit(struct fman_fpm_regs *fpm_rg)
310 {
311 	iowrite32be(0, &fpm_rg->fmfp_mxd);
312 }
313 
314 void fman_set_liodn_per_port(struct fman_rg *fman_rg, uint8_t port_id,
315 				uint16_t liodn_base,
316 				uint16_t liodn_ofst)
317 {
318 	uint32_t tmp;
319 
320 	if ((port_id > 63) || (port_id < 1))
321 	        return;
322 
323 	/* set LIODN base for this port */
324 	tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
325 	if (port_id % 2) {
326 		tmp &= ~FM_LIODN_BASE_MASK;
327 		tmp |= (uint32_t)liodn_base;
328 	} else {
329 		tmp &= ~(FM_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
330 		tmp |= (uint32_t)liodn_base << DMA_LIODN_SHIFT;
331 	}
332 	iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
333 	iowrite32be((uint32_t)liodn_ofst,
334 			&fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
335 }
336 
337 bool fman_is_port_stalled(struct fman_fpm_regs *fpm_rg, uint8_t port_id)
338 {
339 	return (bool)!!(ioread32be(&fpm_rg->fmfp_ps[port_id]) & FPM_PS_STALLED);
340 }
341 
342 void fman_resume_stalled_port(struct fman_fpm_regs *fpm_rg, uint8_t port_id)
343 {
344 	uint32_t	tmp;
345 
346 	tmp = (uint32_t)((port_id << FPM_PORT_FM_CTL_PORTID_SHIFT) |
347 				FPM_PRC_REALSE_STALLED);
348 	iowrite32be(tmp, &fpm_rg->fmfp_prc);
349 }
350 
351 int fman_reset_mac(struct fman_fpm_regs *fpm_rg, uint8_t mac_id, bool is_10g)
352 {
353 	uint32_t msk, timeout = 100;
354 
355 	/* Get the relevant bit mask */
356 	if (is_10g) {
357 		switch (mac_id) {
358 		case(0):
359 			msk = FPM_RSTC_10G0_RESET;
360 			break;
361         case(1):
362             msk = FPM_RSTC_10G1_RESET;
363             break;
364 		default:
365 			return -EINVAL;
366 		}
367 	} else {
368 		switch (mac_id) {
369 		case(0):
370 			msk = FPM_RSTC_1G0_RESET;
371 			break;
372 		case(1):
373 			msk = FPM_RSTC_1G1_RESET;
374 			break;
375 		case(2):
376 			msk = FPM_RSTC_1G2_RESET;
377 			break;
378 		case(3):
379 			msk = FPM_RSTC_1G3_RESET;
380 			break;
381 		case(4):
382 			msk = FPM_RSTC_1G4_RESET;
383 			break;
384         case (5):
385             msk = FPM_RSTC_1G5_RESET;
386             break;
387         case (6):
388             msk = FPM_RSTC_1G6_RESET;
389             break;
390         case (7):
391             msk = FPM_RSTC_1G7_RESET;
392             break;
393 		default:
394 			return -EINVAL;
395 		}
396 	}
397 	/* reset */
398 	iowrite32be(msk, &fpm_rg->fm_rstc);
399 	while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
400 		DELAY(10);
401 
402 	if (!timeout)
403 		return -EBUSY;
404 	return 0;
405 }
406 
407 uint16_t fman_get_size_of_fifo(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
408 {
409 	uint32_t tmp_reg;
410 
411     if ((port_id > 63) || (port_id < 1))
412             return 0;
413 
414 	tmp_reg = ioread32be(&bmi_rg->fmbm_pfs[port_id - 1]);
415 	return (uint16_t)((tmp_reg & BMI_FIFO_SIZE_MASK) + 1);
416 }
417 
418 uint32_t fman_get_total_fifo_size(struct fman_bmi_regs *bmi_rg)
419 {
420 	uint32_t reg, res;
421 
422 	reg = ioread32be(&bmi_rg->fmbm_cfg1);
423 	res = (reg >> BMI_CFG1_FIFO_SIZE_SHIFT) & 0x3ff;
424 	return res * FMAN_BMI_FIFO_UNITS;
425 }
426 
427 uint16_t fman_get_size_of_extra_fifo(struct fman_bmi_regs *bmi_rg,
428 					uint8_t port_id)
429 {
430 	uint32_t tmp_reg;
431 
432     if ((port_id > 63) || (port_id < 1))
433             return 0;
434 
435 	tmp_reg = ioread32be(&bmi_rg->fmbm_pfs[port_id-1]);
436 	return (uint16_t)((tmp_reg & BMI_EXTRA_FIFO_SIZE_MASK) >>
437 				BMI_EXTRA_FIFO_SIZE_SHIFT);
438 }
439 
440 void fman_set_size_of_fifo(struct fman_bmi_regs *bmi_rg,
441 				uint8_t port_id,
442 				uint32_t sz_fifo,
443 				uint32_t extra_sz_fifo)
444 {
445 	uint32_t tmp;
446 
447 	if ((port_id > 63) || (port_id < 1))
448 	        return;
449 
450 	/* calculate reg */
451 	tmp = (uint32_t)((sz_fifo / FMAN_BMI_FIFO_UNITS - 1) |
452 		((extra_sz_fifo / FMAN_BMI_FIFO_UNITS) <<
453 				BMI_EXTRA_FIFO_SIZE_SHIFT));
454 	iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
455 }
456 
457 uint8_t fman_get_num_of_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
458 {
459 	uint32_t tmp;
460 
461     if ((port_id > 63) || (port_id < 1))
462         return 0;
463 
464 	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
465 	return (uint8_t)(((tmp & BMI_NUM_OF_TASKS_MASK) >>
466 				BMI_NUM_OF_TASKS_SHIFT) + 1);
467 }
468 
469 uint8_t fman_get_num_extra_tasks(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
470 {
471 	uint32_t tmp;
472 
473     if ((port_id > 63) || (port_id < 1))
474         return 0;
475 
476 	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
477 	return (uint8_t)((tmp & BMI_NUM_OF_EXTRA_TASKS_MASK) >>
478 				BMI_EXTRA_NUM_OF_TASKS_SHIFT);
479 }
480 
481 void fman_set_num_of_tasks(struct fman_bmi_regs *bmi_rg,
482 				uint8_t port_id,
483 				uint8_t num_tasks,
484 				uint8_t num_extra_tasks)
485 {
486 	uint32_t tmp;
487 
488 	if ((port_id > 63) || (port_id < 1))
489 	    return;
490 
491 	/* calculate reg */
492 	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
493 			~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
494 	tmp |= (uint32_t)(((num_tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
495 			(num_extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
496 	iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
497 }
498 
499 uint8_t fman_get_num_of_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
500 {
501 	uint32_t tmp;
502 
503     if ((port_id > 63) || (port_id < 1))
504             return 0;
505 
506 	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
507 	return (uint8_t)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
508 			BMI_NUM_OF_DMAS_SHIFT) + 1);
509 }
510 
511 uint8_t fman_get_num_extra_dmas(struct fman_bmi_regs *bmi_rg, uint8_t port_id)
512 {
513 	uint32_t tmp;
514 
515 	if ((port_id > 63) || (port_id < 1))
516 	        return 0;
517 
518 	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
519 	return (uint8_t)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
520 			BMI_EXTRA_NUM_OF_DMAS_SHIFT);
521 }
522 
523 void fman_set_num_of_open_dmas(struct fman_bmi_regs *bmi_rg,
524 				uint8_t port_id,
525 				uint8_t num_open_dmas,
526 				uint8_t num_extra_open_dmas,
527 				uint8_t total_num_dmas)
528 {
529 	uint32_t tmp = 0;
530 
531 	if ((port_id > 63) || (port_id < 1))
532 	    return;
533 
534 	/* calculate reg */
535 	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
536 			~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
537 	tmp |= (uint32_t)(((num_open_dmas-1) << BMI_NUM_OF_DMAS_SHIFT) |
538 			(num_extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
539 	iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
540 
541 	/* update total num of DMA's with committed number of open DMAS,
542 	 * and max uncommitted pool. */
543     if (total_num_dmas)
544     {
545         tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
546         tmp |= (uint32_t)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
547         iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
548     }
549 }
550 
551 void fman_set_vsp_window(struct fman_bmi_regs *bmi_rg,
552 			    	     uint8_t port_id,
553 				         uint8_t base_storage_profile,
554 				         uint8_t log2_num_of_profiles)
555 {
556 	uint32_t tmp = 0;
557 	if ((port_id > 63) || (port_id < 1))
558 	    return;
559 
560     tmp = ioread32be(&bmi_rg->fmbm_spliodn[port_id-1]);
561     tmp |= (uint32_t)((uint32_t)base_storage_profile & 0x3f) << 16;
562     tmp |= (uint32_t)log2_num_of_profiles << 28;
563     iowrite32be(tmp, &bmi_rg->fmbm_spliodn[port_id-1]);
564 }
565 
566 void fman_set_congestion_group_pfc_priority(uint32_t *cpg_rg,
567                                             uint32_t congestion_group_id,
568                                             uint8_t priority_bit_map,
569                                             uint32_t reg_num)
570 {
571 	uint32_t offset, tmp = 0;
572 
573     offset  = (congestion_group_id%4)*8;
574 
575     tmp = ioread32be(&cpg_rg[reg_num]);
576     tmp &= ~(0xFF<<offset);
577     tmp |= (uint32_t)priority_bit_map << offset;
578 
579     iowrite32be(tmp,&cpg_rg[reg_num]);
580 }
581 
582 /*****************************************************************************/
583 /*                      API Init unit functions                              */
584 /*****************************************************************************/
585 void fman_defconfig(struct fman_cfg *cfg, bool is_master)
586 {
587     memset(cfg, 0, sizeof(struct fman_cfg));
588 
589     cfg->catastrophic_err               = DEFAULT_CATASTROPHIC_ERR;
590     cfg->dma_err                        = DEFAULT_DMA_ERR;
591     cfg->halt_on_external_activ         = DEFAULT_HALT_ON_EXTERNAL_ACTIVATION;
592     cfg->halt_on_unrecov_ecc_err        = DEFAULT_HALT_ON_UNRECOVERABLE_ECC_ERROR;
593     cfg->en_iram_test_mode              = FALSE;
594     cfg->en_muram_test_mode             = FALSE;
595     cfg->external_ecc_rams_enable       = DEFAULT_EXTERNAL_ECC_RAMS_ENABLE;
596 
597 	if (!is_master)
598 	    return;
599 
600     cfg->dma_aid_override               = DEFAULT_AID_OVERRIDE;
601     cfg->dma_aid_mode                   = DEFAULT_AID_MODE;
602     cfg->dma_comm_qtsh_clr_emer         = DEFAULT_DMA_COMM_Q_LOW;
603     cfg->dma_comm_qtsh_asrt_emer        = DEFAULT_DMA_COMM_Q_HIGH;
604     cfg->dma_cache_override             = DEFAULT_CACHE_OVERRIDE;
605     cfg->dma_cam_num_of_entries         = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
606     cfg->dma_dbg_cnt_mode               = DEFAULT_DMA_DBG_CNT_MODE;
607     cfg->dma_en_emergency               = DEFAULT_DMA_EN_EMERGENCY;
608     cfg->dma_sos_emergency              = DEFAULT_DMA_SOS_EMERGENCY;
609     cfg->dma_watchdog                   = DEFAULT_DMA_WATCHDOG;
610     cfg->dma_en_emergency_smoother      = DEFAULT_DMA_EN_EMERGENCY_SMOOTHER;
611     cfg->dma_emergency_switch_counter   = DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
612     cfg->disp_limit_tsh                 = DEFAULT_DISP_LIMIT;
613     cfg->prs_disp_tsh                   = DEFAULT_PRS_DISP_TH;
614     cfg->plcr_disp_tsh                  = DEFAULT_PLCR_DISP_TH;
615     cfg->kg_disp_tsh                    = DEFAULT_KG_DISP_TH;
616     cfg->bmi_disp_tsh                   = DEFAULT_BMI_DISP_TH;
617     cfg->qmi_enq_disp_tsh               = DEFAULT_QMI_ENQ_DISP_TH;
618     cfg->qmi_deq_disp_tsh               = DEFAULT_QMI_DEQ_DISP_TH;
619     cfg->fm_ctl1_disp_tsh               = DEFAULT_FM_CTL1_DISP_TH;
620     cfg->fm_ctl2_disp_tsh               = DEFAULT_FM_CTL2_DISP_TH;
621 
622 	cfg->pedantic_dma                   = FALSE;
623 	cfg->tnum_aging_period              = DEFAULT_TNUM_AGING_PERIOD;
624 	cfg->dma_stop_on_bus_error          = FALSE;
625 	cfg->qmi_deq_option_support         = FALSE;
626 }
627 
628 void fman_regconfig(struct fman_rg *fman_rg, struct fman_cfg *cfg)
629 {
630 	uint32_t tmp_reg;
631 
632     /* read the values from the registers as they are initialized by the HW with
633      * the required values.
634      */
635     tmp_reg = ioread32be(&fman_rg->bmi_rg->fmbm_cfg1);
636     cfg->total_fifo_size =
637         (((tmp_reg & BMI_TOTAL_FIFO_SIZE_MASK) >> BMI_CFG1_FIFO_SIZE_SHIFT) + 1) * FMAN_BMI_FIFO_UNITS;
638 
639     tmp_reg = ioread32be(&fman_rg->bmi_rg->fmbm_cfg2);
640     cfg->total_num_of_tasks =
641         (uint8_t)(((tmp_reg & BMI_TOTAL_NUM_OF_TASKS_MASK) >> BMI_CFG2_TASKS_SHIFT) + 1);
642 
643     tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmtr);
644     cfg->dma_comm_qtsh_asrt_emer = (uint8_t)(tmp_reg >> DMA_THRESH_COMMQ_SHIFT);
645 
646     tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmhy);
647     cfg->dma_comm_qtsh_clr_emer  = (uint8_t)(tmp_reg >> DMA_THRESH_COMMQ_SHIFT);
648 
649     tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmmr);
650     cfg->dma_cache_override      = (enum fman_dma_cache_override)((tmp_reg & DMA_MODE_CACHE_OR_MASK) >> DMA_MODE_CACHE_OR_SHIFT);
651     cfg->dma_cam_num_of_entries  = (uint8_t)((((tmp_reg & DMA_MODE_CEN_MASK) >> DMA_MODE_CEN_SHIFT) +1)*DMA_CAM_UNITS);
652     cfg->dma_aid_override        = (bool)((tmp_reg & DMA_MODE_AID_OR)? TRUE:FALSE);
653     cfg->dma_dbg_cnt_mode        = (enum fman_dma_dbg_cnt_mode)((tmp_reg & DMA_MODE_DBG_MASK) >> DMA_MODE_DBG_SHIFT);
654     cfg->dma_en_emergency        = (bool)((tmp_reg & DMA_MODE_EB)? TRUE : FALSE);
655 
656     tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_mxd);
657     cfg->disp_limit_tsh          = (uint8_t)((tmp_reg & FPM_DISP_LIMIT_MASK) >> FPM_DISP_LIMIT_SHIFT);
658 
659     tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_dist1);
660     cfg->prs_disp_tsh            = (uint8_t)((tmp_reg & FPM_THR1_PRS_MASK ) >> FPM_THR1_PRS_SHIFT);
661     cfg->plcr_disp_tsh           = (uint8_t)((tmp_reg & FPM_THR1_KG_MASK ) >> FPM_THR1_KG_SHIFT);
662     cfg->kg_disp_tsh             = (uint8_t)((tmp_reg & FPM_THR1_PLCR_MASK ) >> FPM_THR1_PLCR_SHIFT);
663     cfg->bmi_disp_tsh            = (uint8_t)((tmp_reg & FPM_THR1_BMI_MASK ) >> FPM_THR1_BMI_SHIFT);
664 
665     tmp_reg = ioread32be(&fman_rg->fpm_rg->fmfp_dist2);
666     cfg->qmi_enq_disp_tsh        = (uint8_t)((tmp_reg & FPM_THR2_QMI_ENQ_MASK ) >> FPM_THR2_QMI_ENQ_SHIFT);
667     cfg->qmi_deq_disp_tsh        = (uint8_t)((tmp_reg & FPM_THR2_QMI_DEQ_MASK ) >> FPM_THR2_QMI_DEQ_SHIFT);
668     cfg->fm_ctl1_disp_tsh        = (uint8_t)((tmp_reg & FPM_THR2_FM_CTL1_MASK ) >> FPM_THR2_FM_CTL1_SHIFT);
669     cfg->fm_ctl2_disp_tsh        = (uint8_t)((tmp_reg & FPM_THR2_FM_CTL2_MASK ) >> FPM_THR2_FM_CTL2_SHIFT);
670 
671     tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmsetr);
672     cfg->dma_sos_emergency       = tmp_reg;
673 
674     tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmwcr);
675     cfg->dma_watchdog            = tmp_reg/cfg->clk_freq;
676 
677     tmp_reg = ioread32be(&fman_rg->dma_rg->fmdmemsr);
678     cfg->dma_en_emergency_smoother = (bool)((tmp_reg & DMA_EMSR_EMSTR_MASK)? TRUE : FALSE);
679     cfg->dma_emergency_switch_counter = (tmp_reg & DMA_EMSR_EMSTR_MASK);
680 }
681 
682 void fman_reset(struct fman_fpm_regs *fpm_rg)
683 {
684 	iowrite32be(FPM_RSTC_FM_RESET, &fpm_rg->fm_rstc);
685 }
686 
687 /**************************************************************************//**
688  @Function      FM_Init
689 
690  @Description   Initializes the FM module
691 
692  @Param[in]     h_Fm - FM module descriptor
693 
694  @Return        E_OK on success; Error code otherwise.
695 *//***************************************************************************/
696 int fman_dma_init(struct fman_dma_regs *dma_rg, struct fman_cfg *cfg)
697 {
698 	uint32_t    tmp_reg;
699 
700 	/**********************/
701 	/* Init DMA Registers */
702 	/**********************/
703 	/* clear status reg events */
704 	/* oren - check!!!  */
705 	tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
706 			DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
707 	iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg,
708 			&dma_rg->fmdmsr);
709 
710 	/* configure mode register */
711 	tmp_reg = 0;
712 	tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
713 	if (cfg->dma_aid_override)
714 		tmp_reg |= DMA_MODE_AID_OR;
715 	if (cfg->exceptions & FMAN_EX_DMA_BUS_ERROR)
716 		tmp_reg |= DMA_MODE_BER;
717 	if ((cfg->exceptions & FMAN_EX_DMA_SYSTEM_WRITE_ECC) |
718 		(cfg->exceptions & FMAN_EX_DMA_READ_ECC) |
719 		(cfg->exceptions & FMAN_EX_DMA_FM_WRITE_ECC))
720 		tmp_reg |= DMA_MODE_ECC;
721 	if (cfg->dma_stop_on_bus_error)
722 		tmp_reg |= DMA_MODE_SBER;
723 	if(cfg->dma_axi_dbg_num_of_beats)
724 	    tmp_reg |= (uint32_t)(DMA_MODE_AXI_DBG_MASK &
725 		           ((cfg->dma_axi_dbg_num_of_beats - 1) << DMA_MODE_AXI_DBG_SHIFT));
726 
727 	if (cfg->dma_en_emergency) {
728 		tmp_reg |= cfg->dma_emergency_bus_select;
729 		tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
730 	if (cfg->dma_en_emergency_smoother)
731 		iowrite32be(cfg->dma_emergency_switch_counter,
732 				&dma_rg->fmdmemsr);
733 	}
734 	tmp_reg |= ((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) <<
735 			DMA_MODE_CEN_SHIFT;
736 	tmp_reg |= DMA_MODE_SECURE_PROT;
737 	tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
738 	tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
739 
740 	if (cfg->pedantic_dma)
741 		tmp_reg |= DMA_MODE_EMER_READ;
742 
743 	iowrite32be(tmp_reg, &dma_rg->fmdmmr);
744 
745 	/* configure thresholds register */
746 	tmp_reg = ((uint32_t)cfg->dma_comm_qtsh_asrt_emer <<
747 			DMA_THRESH_COMMQ_SHIFT) |
748 			((uint32_t)cfg->dma_read_buf_tsh_asrt_emer <<
749 			DMA_THRESH_READ_INT_BUF_SHIFT) |
750 			((uint32_t)cfg->dma_write_buf_tsh_asrt_emer);
751 
752 	iowrite32be(tmp_reg, &dma_rg->fmdmtr);
753 
754 	/* configure hysteresis register */
755 	tmp_reg = ((uint32_t)cfg->dma_comm_qtsh_clr_emer <<
756 		DMA_THRESH_COMMQ_SHIFT) |
757 		((uint32_t)cfg->dma_read_buf_tsh_clr_emer <<
758 		DMA_THRESH_READ_INT_BUF_SHIFT) |
759 		((uint32_t)cfg->dma_write_buf_tsh_clr_emer);
760 
761 	iowrite32be(tmp_reg, &dma_rg->fmdmhy);
762 
763 	/* configure emergency threshold */
764 	iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
765 
766 	/* configure Watchdog */
767 	iowrite32be((cfg->dma_watchdog * cfg->clk_freq),
768 			&dma_rg->fmdmwcr);
769 
770 	iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
771 
772 	return 0;
773 }
774 
775 int fman_fpm_init(struct fman_fpm_regs *fpm_rg, struct fman_cfg *cfg)
776 {
777 	uint32_t tmp_reg;
778 	int i;
779 
780 	/**********************/
781 	/* Init FPM Registers */
782 	/**********************/
783 	tmp_reg = (uint32_t)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
784 	iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
785 
786 	tmp_reg = (((uint32_t)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
787 		((uint32_t)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
788 		((uint32_t)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
789 		((uint32_t)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
790 	iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
791 
792 	tmp_reg = (((uint32_t)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
793 		((uint32_t)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
794 		((uint32_t)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
795 		((uint32_t)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
796 	iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
797 
798 	/* define exceptions and error behavior */
799 	tmp_reg = 0;
800 	/* Clear events */
801 	tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
802 		FPM_EV_MASK_SINGLE_ECC);
803 	/* enable interrupts */
804 	if (cfg->exceptions & FMAN_EX_FPM_STALL_ON_TASKS)
805 		tmp_reg |= FPM_EV_MASK_STALL_EN;
806 	if (cfg->exceptions & FMAN_EX_FPM_SINGLE_ECC)
807 		tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
808 	if (cfg->exceptions & FMAN_EX_FPM_DOUBLE_ECC)
809 		tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
810 	tmp_reg |= (cfg->catastrophic_err  << FPM_EV_MASK_CAT_ERR_SHIFT);
811 	tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
812 	if (!cfg->halt_on_external_activ)
813 		tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
814 	if (!cfg->halt_on_unrecov_ecc_err)
815 		tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
816 	iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
817 
818 	/* clear all fmCtls event registers */
819 	for (i = 0; i < cfg->num_of_fman_ctrl_evnt_regs; i++)
820 		iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
821 
822 	/* RAM ECC -  enable and clear events*/
823 	/* first we need to clear all parser memory,
824 	 * as it is uninitialized and may cause ECC errors */
825 	/* event bits */
826 	tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
827 	/* Rams enable not effected by RCR bit, but by a COP configuration */
828 	if (cfg->external_ecc_rams_enable)
829 		tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
830 
831 	/* enable test mode */
832 	if (cfg->en_muram_test_mode)
833 		tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
834 	if (cfg->en_iram_test_mode)
835 		tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
836 	iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
837 
838 	tmp_reg = 0;
839 	if (cfg->exceptions & FMAN_EX_IRAM_ECC) {
840 		tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
841 		fman_enable_rams_ecc(fpm_rg);
842 	}
843 	if (cfg->exceptions & FMAN_EX_NURAM_ECC) {
844 		tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
845 		fman_enable_rams_ecc(fpm_rg);
846 	}
847 	iowrite32be(tmp_reg, &fpm_rg->fm_rie);
848 
849 	return 0;
850 }
851 
852 int fman_bmi_init(struct fman_bmi_regs *bmi_rg, struct fman_cfg *cfg)
853 {
854 	uint32_t tmp_reg;
855 
856 	/**********************/
857 	/* Init BMI Registers */
858 	/**********************/
859 
860 	/* define common resources */
861 	tmp_reg = cfg->fifo_base_addr;
862 	tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
863 
864 	tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
865 			BMI_CFG1_FIFO_SIZE_SHIFT);
866 	iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
867 
868 	tmp_reg = ((uint32_t)(cfg->total_num_of_tasks - 1) <<
869 			BMI_CFG2_TASKS_SHIFT);
870 	/* num of DMA's will be dynamically updated when each port is set */
871 	iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
872 
873 	/* define unmaskable exceptions, enable and clear events */
874 	tmp_reg = 0;
875 	iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
876 			BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
877 			BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
878 			BMI_ERR_INTR_EN_DISPATCH_RAM_ECC,
879 			&bmi_rg->fmbm_ievr);
880 
881 	if (cfg->exceptions & FMAN_EX_BMI_LIST_RAM_ECC)
882 		tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
883 	if (cfg->exceptions & FMAN_EX_BMI_PIPELINE_ECC)
884 		tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
885 	if (cfg->exceptions & FMAN_EX_BMI_STATISTICS_RAM_ECC)
886 		tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
887 	if (cfg->exceptions & FMAN_EX_BMI_DISPATCH_RAM_ECC)
888 		tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
889 	iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
890 
891 	return 0;
892 }
893 
894 int fman_qmi_init(struct fman_qmi_regs *qmi_rg, struct fman_cfg *cfg)
895 {
896 	uint32_t tmp_reg;
897 	uint16_t period_in_fm_clocks;
898 	uint8_t remainder;
899 	/**********************/
900 	/* Init QMI Registers */
901 	/**********************/
902 	/* Clear error interrupt events */
903 
904 	iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
905 			&qmi_rg->fmqm_eie);
906 	tmp_reg = 0;
907 	if (cfg->exceptions & FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
908 		tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
909 	if (cfg->exceptions & FMAN_EX_QMI_DOUBLE_ECC)
910 		tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
911 	/* enable events */
912 	iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
913 
914 	if (cfg->tnum_aging_period) {
915 		/* tnum_aging_period is in units of usec, p_FmClockFreq in Mhz */
916 		period_in_fm_clocks = (uint16_t)
917 				(cfg->tnum_aging_period * cfg->clk_freq);
918 		/* period_in_fm_clocks must be a 64 multiply */
919 		remainder = (uint8_t)(period_in_fm_clocks % 64);
920 		if (remainder)
921 			tmp_reg = (uint32_t)((period_in_fm_clocks / 64) + 1);
922 		else{
923 			tmp_reg = (uint32_t)(period_in_fm_clocks / 64);
924 			if (!tmp_reg)
925 				tmp_reg = 1;
926 		}
927 		tmp_reg <<= QMI_TAPC_TAP;
928 		iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
929 	}
930 	tmp_reg = 0;
931 	/* Clear interrupt events */
932 	iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
933 	if (cfg->exceptions & FMAN_EX_QMI_SINGLE_ECC)
934 		tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
935 	/* enable events */
936 	iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
937 
938 	return 0;
939 }
940 
941 int fman_enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
942 {
943 	uint32_t cfg_reg = 0;
944 
945 	/**********************/
946 	/* Enable all modules */
947 	/**********************/
948 	/* clear & enable global counters  - calculate reg and save for later,
949 	because it's the same reg for QMI enable */
950 	cfg_reg = QMI_CFG_EN_COUNTERS;
951 	if (cfg->qmi_deq_option_support)
952 		cfg_reg |= (uint32_t)(((cfg->qmi_def_tnums_thresh) << 8) |
953 				(uint32_t)cfg->qmi_def_tnums_thresh);
954 
955 	iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
956 	iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
957 			&fman_rg->qmi_rg->fmqm_gc);
958 
959 	return 0;
960 }
961 
962 void fman_free_resources(struct fman_rg *fman_rg)
963 {
964 	/* disable BMI and QMI */
965 	iowrite32be(0, &fman_rg->bmi_rg->fmbm_init);
966 	iowrite32be(0, &fman_rg->qmi_rg->fmqm_gc);
967 
968 	/* release BMI resources */
969 	iowrite32be(0, &fman_rg->bmi_rg->fmbm_cfg2);
970 	iowrite32be(0, &fman_rg->bmi_rg->fmbm_cfg1);
971 
972 	/* disable ECC */
973 	iowrite32be(0, &fman_rg->fpm_rg->fm_rcr);
974 }
975 
976 /****************************************************/
977 /*       API Run-time Control uint functions        */
978 /****************************************************/
979 uint32_t fman_get_normal_pending(struct fman_fpm_regs *fpm_rg)
980 {
981 	return ioread32be(&fpm_rg->fm_npi);
982 }
983 
984 uint32_t fman_get_controller_event(struct fman_fpm_regs *fpm_rg, uint8_t reg_id)
985 {
986 	uint32_t event;
987 
988 	event = ioread32be(&fpm_rg->fmfp_fcev[reg_id]) &
989 			ioread32be(&fpm_rg->fmfp_cee[reg_id]);
990 	iowrite32be(event, &fpm_rg->fmfp_cev[reg_id]);
991 
992 	return event;
993 }
994 
995 uint32_t fman_get_error_pending(struct fman_fpm_regs *fpm_rg)
996 {
997 	return ioread32be(&fpm_rg->fm_epi);
998 }
999 
1000 void fman_set_ports_bandwidth(struct fman_bmi_regs *bmi_rg, uint8_t *weights)
1001 {
1002 	int i;
1003 	uint8_t shift;
1004 	uint32_t tmp = 0;
1005 
1006 	for (i = 0; i < 64; i++) {
1007 		if (weights[i] > 1) { /* no need to write 1 since it is 0 */
1008 			/* Add this port to tmp_reg */
1009 			/* (each 8 ports result in one register)*/
1010 			shift = (uint8_t)(32 - 4 * ((i % 8) + 1));
1011 			tmp |= ((weights[i] - 1) << shift);
1012 		}
1013 		if (i % 8 == 7) { /* last in this set */
1014 			iowrite32be(tmp, &bmi_rg->fmbm_arb[i / 8]);
1015 			tmp = 0;
1016 		}
1017 	}
1018 }
1019 
1020 void fman_enable_rams_ecc(struct fman_fpm_regs *fpm_rg)
1021 {
1022 	uint32_t tmp;
1023 
1024 	tmp = ioread32be(&fpm_rg->fm_rcr);
1025 	if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
1026 		iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN,
1027 				&fpm_rg->fm_rcr);
1028 	else
1029 		iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
1030 				FPM_RAM_IRAM_ECC_EN,
1031 				&fpm_rg->fm_rcr);
1032 }
1033 
1034 void fman_disable_rams_ecc(struct fman_fpm_regs *fpm_rg)
1035 {
1036 	uint32_t tmp;
1037 
1038 	tmp = ioread32be(&fpm_rg->fm_rcr);
1039 	if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
1040 		iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN,
1041 				&fpm_rg->fm_rcr);
1042 	else
1043 		iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
1044 				&fpm_rg->fm_rcr);
1045 }
1046 
1047 int fman_set_exception(struct fman_rg *fman_rg,
1048 			enum fman_exceptions exception,
1049 			bool enable)
1050 {
1051 	uint32_t tmp;
1052 
1053 	switch (exception) {
1054 	case(E_FMAN_EX_DMA_BUS_ERROR):
1055 		tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
1056 		if (enable)
1057 			tmp |= DMA_MODE_BER;
1058 		else
1059 			tmp &= ~DMA_MODE_BER;
1060 		/* disable bus error */
1061 		iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
1062 		break;
1063 	case(E_FMAN_EX_DMA_READ_ECC):
1064 	case(E_FMAN_EX_DMA_SYSTEM_WRITE_ECC):
1065 	case(E_FMAN_EX_DMA_FM_WRITE_ECC):
1066 		tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
1067 		if (enable)
1068 			tmp |= DMA_MODE_ECC;
1069 		else
1070 			tmp &= ~DMA_MODE_ECC;
1071 		iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
1072 		break;
1073 	case(E_FMAN_EX_FPM_STALL_ON_TASKS):
1074 		tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
1075 		if (enable)
1076 			tmp |= FPM_EV_MASK_STALL_EN;
1077 		else
1078 			tmp &= ~FPM_EV_MASK_STALL_EN;
1079 		iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
1080 		break;
1081 	case(E_FMAN_EX_FPM_SINGLE_ECC):
1082 		tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
1083 		if (enable)
1084 			tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
1085 		else
1086 			tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
1087 		iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
1088 		break;
1089 	case(E_FMAN_EX_FPM_DOUBLE_ECC):
1090 		tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
1091 		if (enable)
1092 			tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
1093 		else
1094 			tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
1095 		iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
1096 		break;
1097 	case(E_FMAN_EX_QMI_SINGLE_ECC):
1098 		tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
1099 		if (enable)
1100 			tmp |= QMI_INTR_EN_SINGLE_ECC;
1101 		else
1102 			tmp &= ~QMI_INTR_EN_SINGLE_ECC;
1103 		iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
1104 		break;
1105 	case(E_FMAN_EX_QMI_DOUBLE_ECC):
1106 		tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
1107 		if (enable)
1108 			tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
1109 		else
1110 			tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
1111 		iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
1112 		break;
1113 	case(E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID):
1114 		tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
1115 		if (enable)
1116 			tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
1117 		else
1118 			tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
1119 		iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
1120 		break;
1121 	case(E_FMAN_EX_BMI_LIST_RAM_ECC):
1122 		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
1123 		if (enable)
1124 			tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
1125 		else
1126 			tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
1127 		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
1128 		break;
1129 	case(E_FMAN_EX_BMI_STORAGE_PROFILE_ECC):
1130 		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
1131 		if (enable)
1132 			tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
1133 		else
1134 			tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
1135 		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
1136 		break;
1137 	case(E_FMAN_EX_BMI_STATISTICS_RAM_ECC):
1138 		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
1139 		if (enable)
1140 			tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
1141 		else
1142 			tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
1143 		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
1144 		break;
1145 	case(E_FMAN_EX_BMI_DISPATCH_RAM_ECC):
1146 		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
1147 		if (enable)
1148 			tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
1149 		else
1150 			tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
1151 		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
1152 		break;
1153 	case(E_FMAN_EX_IRAM_ECC):
1154 		tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
1155 		if (enable) {
1156 			/* enable ECC if not enabled */
1157 			fman_enable_rams_ecc(fman_rg->fpm_rg);
1158 			/* enable ECC interrupts */
1159 			tmp |= FPM_IRAM_ECC_ERR_EX_EN;
1160 		} else {
1161 			/* ECC mechanism may be disabled,
1162 			 * depending on driver status  */
1163 			fman_disable_rams_ecc(fman_rg->fpm_rg);
1164 			tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
1165 		}
1166 		iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
1167 		break;
1168 	case(E_FMAN_EX_MURAM_ECC):
1169 		tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
1170 		if (enable) {
1171 			/* enable ECC if not enabled */
1172 			fman_enable_rams_ecc(fman_rg->fpm_rg);
1173 			/* enable ECC interrupts */
1174 			tmp |= FPM_MURAM_ECC_ERR_EX_EN;
1175 		} else {
1176 			/* ECC mechanism may be disabled,
1177 			 * depending on driver status  */
1178 			fman_disable_rams_ecc(fman_rg->fpm_rg);
1179 			tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
1180 		}
1181 		iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
1182 		break;
1183 	default:
1184 		return -EINVAL;
1185 	}
1186 	return 0;
1187 }
1188 
1189 void fman_get_revision(struct fman_fpm_regs *fpm_rg,
1190 			uint8_t *major,
1191 			uint8_t *minor)
1192 {
1193 	uint32_t tmp;
1194 
1195 	tmp = ioread32be(&fpm_rg->fm_ip_rev_1);
1196 	*major = (uint8_t)((tmp & FPM_REV1_MAJOR_MASK) >> FPM_REV1_MAJOR_SHIFT);
1197 	*minor = (uint8_t)((tmp & FPM_REV1_MINOR_MASK) >> FPM_REV1_MINOR_SHIFT);
1198 
1199 }
1200 
1201 uint32_t fman_get_counter(struct fman_rg *fman_rg,
1202 				enum fman_counters reg_name)
1203 {
1204 	uint32_t ret_val;
1205 
1206 	switch (reg_name) {
1207 	case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
1208 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_etfc);
1209 		break;
1210 	case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
1211 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dtfc);
1212 		break;
1213 	case(E_FMAN_COUNTERS_DEQ_0):
1214 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc0);
1215 		break;
1216 	case(E_FMAN_COUNTERS_DEQ_1):
1217 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc1);
1218 		break;
1219 	case(E_FMAN_COUNTERS_DEQ_2):
1220 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc2);
1221 		break;
1222 	case(E_FMAN_COUNTERS_DEQ_3):
1223 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dc3);
1224 		break;
1225 	case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
1226 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dfdc);
1227 		break;
1228 	case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
1229 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dfcc);
1230 		break;
1231 	case(E_FMAN_COUNTERS_DEQ_FROM_FD):
1232 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dffc);
1233 		break;
1234 	case(E_FMAN_COUNTERS_DEQ_CONFIRM):
1235 		ret_val = ioread32be(&fman_rg->qmi_rg->fmqm_dcc);
1236 		break;
1237 	default:
1238 		ret_val = 0;
1239 	}
1240 	return ret_val;
1241 }
1242 
1243 int fman_modify_counter(struct fman_rg *fman_rg,
1244 			enum fman_counters reg_name,
1245 			uint32_t val)
1246 {
1247 	/* When applicable (when there is an 'enable counters' bit,
1248 	 * check that counters are enabled */
1249 	switch (reg_name) {
1250 	case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
1251 	case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
1252 	case(E_FMAN_COUNTERS_DEQ_0):
1253 	case(E_FMAN_COUNTERS_DEQ_1):
1254 	case(E_FMAN_COUNTERS_DEQ_2):
1255 	case(E_FMAN_COUNTERS_DEQ_3):
1256 	case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
1257 	case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
1258 	case(E_FMAN_COUNTERS_DEQ_FROM_FD):
1259 	case(E_FMAN_COUNTERS_DEQ_CONFIRM):
1260 		if (!(ioread32be(&fman_rg->qmi_rg->fmqm_gc) &
1261 				QMI_CFG_EN_COUNTERS))
1262 			return -EINVAL;
1263 		break;
1264 	default:
1265 		break;
1266 	}
1267 	/* Set counter */
1268 	switch (reg_name) {
1269 	case(E_FMAN_COUNTERS_ENQ_TOTAL_FRAME):
1270 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_etfc);
1271 		break;
1272 	case(E_FMAN_COUNTERS_DEQ_TOTAL_FRAME):
1273 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dtfc);
1274 		break;
1275 	case(E_FMAN_COUNTERS_DEQ_0):
1276 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc0);
1277 		break;
1278 	case(E_FMAN_COUNTERS_DEQ_1):
1279 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc1);
1280 		break;
1281 	case(E_FMAN_COUNTERS_DEQ_2):
1282 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc2);
1283 		break;
1284 	case(E_FMAN_COUNTERS_DEQ_3):
1285 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dc3);
1286 		break;
1287 	case(E_FMAN_COUNTERS_DEQ_FROM_DEFAULT):
1288 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dfdc);
1289 		break;
1290 	case(E_FMAN_COUNTERS_DEQ_FROM_CONTEXT):
1291 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dfcc);
1292 		break;
1293 	case(E_FMAN_COUNTERS_DEQ_FROM_FD):
1294 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dffc);
1295 		break;
1296 	case(E_FMAN_COUNTERS_DEQ_CONFIRM):
1297 		iowrite32be(val, &fman_rg->qmi_rg->fmqm_dcc);
1298 		break;
1299 	case(E_FMAN_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT):
1300 		iowrite32be(val, &fman_rg->dma_rg->fmdmsefrc);
1301 		break;
1302 	case(E_FMAN_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT):
1303 		iowrite32be(val, &fman_rg->dma_rg->fmdmsqfrc);
1304 		break;
1305 	case(E_FMAN_COUNTERS_SEMAPHOR_SYNC_REJECT):
1306 		iowrite32be(val, &fman_rg->dma_rg->fmdmssrc);
1307 		break;
1308 	default:
1309 		break;
1310 	}
1311 	return 0;
1312 }
1313 
1314 void fman_set_dma_emergency(struct fman_dma_regs *dma_rg,
1315 				bool is_write,
1316 				bool enable)
1317 {
1318 	uint32_t msk;
1319 
1320 	msk = (uint32_t)(is_write ? DMA_MODE_EMER_WRITE : DMA_MODE_EMER_READ);
1321 
1322 	if (enable)
1323 		iowrite32be(ioread32be(&dma_rg->fmdmmr) | msk,
1324 				&dma_rg->fmdmmr);
1325 	else /* disable */
1326 		iowrite32be(ioread32be(&dma_rg->fmdmmr) & ~msk,
1327 				&dma_rg->fmdmmr);
1328 }
1329 
1330 void fman_set_dma_ext_bus_pri(struct fman_dma_regs *dma_rg, uint32_t pri)
1331 {
1332 	uint32_t tmp;
1333 
1334 	tmp = ioread32be(&dma_rg->fmdmmr) |
1335 			(pri << DMA_MODE_BUS_PRI_SHIFT);
1336 
1337 	iowrite32be(tmp, &dma_rg->fmdmmr);
1338 }
1339 
1340 uint32_t fman_get_dma_status(struct fman_dma_regs *dma_rg)
1341 {
1342 	return ioread32be(&dma_rg->fmdmsr);
1343 }
1344 
1345 void fman_force_intr(struct fman_rg *fman_rg,
1346 		enum fman_exceptions exception)
1347 {
1348 	switch (exception) {
1349 	case E_FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
1350 		iowrite32be(QMI_ERR_INTR_EN_DEQ_FROM_DEF,
1351 				&fman_rg->qmi_rg->fmqm_eif);
1352 		break;
1353 	case E_FMAN_EX_QMI_SINGLE_ECC:
1354 		iowrite32be(QMI_INTR_EN_SINGLE_ECC,
1355 				&fman_rg->qmi_rg->fmqm_if);
1356 		break;
1357 	case E_FMAN_EX_QMI_DOUBLE_ECC:
1358 		iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC,
1359 				&fman_rg->qmi_rg->fmqm_eif);
1360 		break;
1361 	case E_FMAN_EX_BMI_LIST_RAM_ECC:
1362 		iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC,
1363 				&fman_rg->bmi_rg->fmbm_ifr);
1364 		break;
1365 	case E_FMAN_EX_BMI_STORAGE_PROFILE_ECC:
1366 		iowrite32be(BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC,
1367 				&fman_rg->bmi_rg->fmbm_ifr);
1368 		break;
1369 	case E_FMAN_EX_BMI_STATISTICS_RAM_ECC:
1370 		iowrite32be(BMI_ERR_INTR_EN_STATISTICS_RAM_ECC,
1371 				&fman_rg->bmi_rg->fmbm_ifr);
1372 		break;
1373 	case E_FMAN_EX_BMI_DISPATCH_RAM_ECC:
1374 		iowrite32be(BMI_ERR_INTR_EN_DISPATCH_RAM_ECC,
1375 				&fman_rg->bmi_rg->fmbm_ifr);
1376 		break;
1377 	default:
1378 		break;
1379 	}
1380 }
1381 
1382 bool fman_is_qmi_halt_not_busy_state(struct fman_qmi_regs *qmi_rg)
1383 {
1384 	return (bool)!!(ioread32be(&qmi_rg->fmqm_gs) & QMI_GS_HALT_NOT_BUSY);
1385 }
1386 void fman_resume(struct fman_fpm_regs *fpm_rg)
1387 {
1388 	uint32_t tmp;
1389 
1390 	tmp = ioread32be(&fpm_rg->fmfp_ee);
1391 	/* clear tmp_reg event bits in order not to clear standing events */
1392 	tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
1393 			FPM_EV_MASK_STALL |
1394 			FPM_EV_MASK_SINGLE_ECC);
1395 	tmp |= FPM_EV_MASK_RELEASE_FM;
1396 
1397 	iowrite32be(tmp, &fpm_rg->fmfp_ee);
1398 }
1399