1 /*
2 * Copyright 2008-2012 Freescale Semiconductor Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * * Neither the name of Freescale Semiconductor nor the
12 * names of its contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 *
16 * ALTERNATIVELY, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") as published by the Free Software
18 * Foundation, either version 2 of that License or (at your option) any
19 * later version.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33
34 #include "common/general.h"
35
36 #include "fman_common.h"
37 #include "fsl_fman_port.h"
38
39
40 /* problem Eyal: the following should not be here*/
41 #define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME 0x00000028
42
get_no_pcd_nia_bmi_ac_enc_frame(struct fman_port_cfg * cfg)43 static uint32_t get_no_pcd_nia_bmi_ac_enc_frame(struct fman_port_cfg *cfg)
44 {
45 if (cfg->errata_A006675)
46 return NIA_ENG_FM_CTL |
47 NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
48 else
49 return NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
50 }
51
init_bmi_rx(struct fman_port * port,struct fman_port_cfg * cfg,struct fman_port_params * params)52 static int init_bmi_rx(struct fman_port *port,
53 struct fman_port_cfg *cfg,
54 struct fman_port_params *params)
55 {
56 struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
57 uint32_t tmp;
58
59 /* Rx Configuration register */
60 tmp = 0;
61 if (port->im_en)
62 tmp |= BMI_PORT_CFG_IM;
63 else if (cfg->discard_override)
64 tmp |= BMI_PORT_CFG_FDOVR;
65 iowrite32be(tmp, ®s->fmbm_rcfg);
66
67 /* DMA attributes */
68 tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
69 if (cfg->dma_ic_stash_on)
70 tmp |= BMI_DMA_ATTR_IC_STASH_ON;
71 if (cfg->dma_header_stash_on)
72 tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
73 if (cfg->dma_sg_stash_on)
74 tmp |= BMI_DMA_ATTR_SG_STASH_ON;
75 if (cfg->dma_write_optimize)
76 tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
77 iowrite32be(tmp, ®s->fmbm_rda);
78
79 /* Rx FIFO parameters */
80 tmp = (cfg->rx_pri_elevation / FMAN_PORT_BMI_FIFO_UNITS - 1) <<
81 BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
82 tmp |= cfg->rx_fifo_thr / FMAN_PORT_BMI_FIFO_UNITS - 1;
83 iowrite32be(tmp, ®s->fmbm_rfp);
84
85 if (cfg->excessive_threshold_register)
86 /* always allow access to the extra resources */
87 iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, ®s->fmbm_reth);
88
89 /* Frame end data */
90 tmp = (uint32_t)cfg->checksum_bytes_ignore <<
91 BMI_RX_FRAME_END_CS_IGNORE_SHIFT;
92 tmp |= (uint32_t)cfg->rx_cut_end_bytes <<
93 BMI_RX_FRAME_END_CUT_SHIFT;
94 if (cfg->errata_A006320)
95 tmp &= 0xffe0ffff;
96 iowrite32be(tmp, ®s->fmbm_rfed);
97
98 /* Internal context parameters */
99 tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
100 BMI_IC_TO_EXT_SHIFT;
101 tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
102 BMI_IC_FROM_INT_SHIFT;
103 tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
104 iowrite32be(tmp, ®s->fmbm_ricp);
105
106 /* Internal buffer offset */
107 tmp = ((uint32_t)cfg->int_buf_start_margin / FMAN_PORT_IC_OFFSET_UNITS)
108 << BMI_INT_BUF_MARG_SHIFT;
109 iowrite32be(tmp, ®s->fmbm_rim);
110
111 /* External buffer margins */
112 if (!port->im_en)
113 {
114 tmp = (uint32_t)cfg->ext_buf_start_margin <<
115 BMI_EXT_BUF_MARG_START_SHIFT;
116 tmp |= (uint32_t)cfg->ext_buf_end_margin;
117 if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
118 tmp |= BMI_SG_DISABLE;
119 iowrite32be(tmp, ®s->fmbm_rebm);
120 }
121
122 /* Frame attributes */
123 tmp = BMI_CMD_RX_MR_DEF;
124 if (!port->im_en)
125 {
126 tmp |= BMI_CMD_ATTR_ORDER;
127 tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
128 if (cfg->sync_req)
129 tmp |= BMI_CMD_ATTR_SYNC;
130 }
131 iowrite32be(tmp, ®s->fmbm_rfca);
132
133 /* NIA */
134 if (port->im_en)
135 tmp = NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_RX;
136 else
137 {
138 tmp = (uint32_t)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
139 tmp |= get_no_pcd_nia_bmi_ac_enc_frame(cfg);
140 }
141 iowrite32be(tmp, ®s->fmbm_rfne);
142
143 /* Enqueue NIA */
144 iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene);
145
146 /* Default/error queues */
147 if (!port->im_en)
148 {
149 iowrite32be((params->dflt_fqid & 0x00FFFFFF), ®s->fmbm_rfqid);
150 iowrite32be((params->err_fqid & 0x00FFFFFF), ®s->fmbm_refqid);
151 }
152
153 /* Discard/error masks */
154 iowrite32be(params->discard_mask, ®s->fmbm_rfsdm);
155 iowrite32be(params->err_mask, ®s->fmbm_rfsem);
156
157 /* Statistics counters */
158 tmp = 0;
159 if (cfg->stats_counters_enable)
160 tmp = BMI_COUNTERS_EN;
161 iowrite32be(tmp, ®s->fmbm_rstc);
162
163 /* Performance counters */
164 fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
165 tmp = 0;
166 if (cfg->perf_counters_enable)
167 tmp = BMI_COUNTERS_EN;
168 iowrite32be(tmp, ®s->fmbm_rpc);
169
170 return 0;
171 }
172
init_bmi_tx(struct fman_port * port,struct fman_port_cfg * cfg,struct fman_port_params * params)173 static int init_bmi_tx(struct fman_port *port,
174 struct fman_port_cfg *cfg,
175 struct fman_port_params *params)
176 {
177 struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
178 uint32_t tmp;
179
180 /* Tx Configuration register */
181 tmp = 0;
182 if (port->im_en)
183 tmp |= BMI_PORT_CFG_IM;
184 iowrite32be(tmp, ®s->fmbm_tcfg);
185
186 /* DMA attributes */
187 tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
188 if (cfg->dma_ic_stash_on)
189 tmp |= BMI_DMA_ATTR_IC_STASH_ON;
190 if (cfg->dma_header_stash_on)
191 tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
192 if (cfg->dma_sg_stash_on)
193 tmp |= BMI_DMA_ATTR_SG_STASH_ON;
194 iowrite32be(tmp, ®s->fmbm_tda);
195
196 /* Tx FIFO parameters */
197 tmp = (cfg->tx_fifo_min_level / FMAN_PORT_BMI_FIFO_UNITS) <<
198 BMI_TX_FIFO_MIN_FILL_SHIFT;
199 tmp |= ((uint32_t)cfg->tx_fifo_deq_pipeline_depth - 1) <<
200 BMI_FIFO_PIPELINE_DEPTH_SHIFT;
201 tmp |= (uint32_t)(cfg->tx_fifo_low_comf_level /
202 FMAN_PORT_BMI_FIFO_UNITS - 1);
203 iowrite32be(tmp, ®s->fmbm_tfp);
204
205 /* Frame end data */
206 tmp = (uint32_t)cfg->checksum_bytes_ignore <<
207 BMI_FRAME_END_CS_IGNORE_SHIFT;
208 iowrite32be(tmp, ®s->fmbm_tfed);
209
210 /* Internal context parameters */
211 if (!port->im_en)
212 {
213 tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
214 BMI_IC_TO_EXT_SHIFT;
215 tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
216 BMI_IC_FROM_INT_SHIFT;
217 tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
218 iowrite32be(tmp, ®s->fmbm_ticp);
219 }
220 /* Frame attributes */
221 tmp = BMI_CMD_TX_MR_DEF;
222 if (port->im_en)
223 tmp |= BMI_CMD_MR_DEAS;
224 else
225 {
226 tmp |= BMI_CMD_ATTR_ORDER;
227 tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
228 }
229 iowrite32be(tmp, ®s->fmbm_tfca);
230
231 /* Dequeue NIA + enqueue NIA */
232 if (port->im_en)
233 {
234 iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX, ®s->fmbm_tfdne);
235 iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX, ®s->fmbm_tfene);
236 }
237 else
238 {
239 iowrite32be(NIA_ENG_QMI_DEQ, ®s->fmbm_tfdne);
240 iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_tfene);
241 if (cfg->fmbm_tfne_has_features)
242 iowrite32be(!params->dflt_fqid ?
243 BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
244 NIA_BMI_AC_FETCH_ALL_FRAME, ®s->fmbm_tfne);
245 if (!params->dflt_fqid && params->dont_release_buf)
246 {
247 iowrite32be(0x00FFFFFF, ®s->fmbm_tcfqid);
248 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE, ®s->fmbm_tfene);
249 if (cfg->fmbm_tfne_has_features)
250 iowrite32be(ioread32be(®s->fmbm_tfne) & ~BMI_EBD_EN, ®s->fmbm_tfne);
251 }
252 }
253
254 /* Confirmation/error queues */
255 if (!port->im_en)
256 {
257 if (params->dflt_fqid || !params->dont_release_buf)
258 iowrite32be(params->dflt_fqid & 0x00FFFFFF, ®s->fmbm_tcfqid);
259 iowrite32be((params->err_fqid & 0x00FFFFFF), ®s->fmbm_tefqid);
260 }
261 /* Statistics counters */
262 tmp = 0;
263 if (cfg->stats_counters_enable)
264 tmp = BMI_COUNTERS_EN;
265 iowrite32be(tmp, ®s->fmbm_tstc);
266
267 /* Performance counters */
268 fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
269 tmp = 0;
270 if (cfg->perf_counters_enable)
271 tmp = BMI_COUNTERS_EN;
272 iowrite32be(tmp, ®s->fmbm_tpc);
273
274 return 0;
275 }
276
init_bmi_oh(struct fman_port * port,struct fman_port_cfg * cfg,struct fman_port_params * params)277 static int init_bmi_oh(struct fman_port *port,
278 struct fman_port_cfg *cfg,
279 struct fman_port_params *params)
280 {
281 struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
282 uint32_t tmp;
283
284 /* OP Configuration register */
285 tmp = 0;
286 if (cfg->discard_override)
287 tmp |= BMI_PORT_CFG_FDOVR;
288 iowrite32be(tmp, ®s->fmbm_ocfg);
289
290 /* DMA attributes */
291 tmp = (uint32_t)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
292 if (cfg->dma_ic_stash_on)
293 tmp |= BMI_DMA_ATTR_IC_STASH_ON;
294 if (cfg->dma_header_stash_on)
295 tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
296 if (cfg->dma_sg_stash_on)
297 tmp |= BMI_DMA_ATTR_SG_STASH_ON;
298 if (cfg->dma_write_optimize)
299 tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
300 iowrite32be(tmp, ®s->fmbm_oda);
301
302 /* Tx FIFO parameters */
303 tmp = ((uint32_t)cfg->tx_fifo_deq_pipeline_depth - 1) <<
304 BMI_FIFO_PIPELINE_DEPTH_SHIFT;
305 iowrite32be(tmp, ®s->fmbm_ofp);
306
307 /* Internal context parameters */
308 tmp = ((uint32_t)cfg->ic_ext_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
309 BMI_IC_TO_EXT_SHIFT;
310 tmp |= ((uint32_t)cfg->ic_int_offset / FMAN_PORT_IC_OFFSET_UNITS) <<
311 BMI_IC_FROM_INT_SHIFT;
312 tmp |= cfg->ic_size / FMAN_PORT_IC_OFFSET_UNITS;
313 iowrite32be(tmp, ®s->fmbm_oicp);
314
315 /* Frame attributes */
316 tmp = BMI_CMD_OP_MR_DEF;
317 tmp |= (uint32_t)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
318 if (cfg->sync_req)
319 tmp |= BMI_CMD_ATTR_SYNC;
320 if (port->type == E_FMAN_PORT_TYPE_OP)
321 tmp |= BMI_CMD_ATTR_ORDER;
322 iowrite32be(tmp, ®s->fmbm_ofca);
323
324 /* Internal buffer offset */
325 tmp = ((uint32_t)cfg->int_buf_start_margin / FMAN_PORT_IC_OFFSET_UNITS)
326 << BMI_INT_BUF_MARG_SHIFT;
327 iowrite32be(tmp, ®s->fmbm_oim);
328
329 /* Dequeue NIA */
330 iowrite32be(NIA_ENG_QMI_DEQ, ®s->fmbm_ofdne);
331
332 /* NIA and Enqueue NIA */
333 if (port->type == E_FMAN_PORT_TYPE_HC) {
334 iowrite32be(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_HC,
335 ®s->fmbm_ofne);
336 iowrite32be(NIA_ENG_QMI_ENQ, ®s->fmbm_ofene);
337 } else {
338 iowrite32be(get_no_pcd_nia_bmi_ac_enc_frame(cfg),
339 ®s->fmbm_ofne);
340 iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR,
341 ®s->fmbm_ofene);
342 }
343
344 /* Default/error queues */
345 iowrite32be((params->dflt_fqid & 0x00FFFFFF), ®s->fmbm_ofqid);
346 iowrite32be((params->err_fqid & 0x00FFFFFF), ®s->fmbm_oefqid);
347
348 /* Discard/error masks */
349 if (port->type == E_FMAN_PORT_TYPE_OP) {
350 iowrite32be(params->discard_mask, ®s->fmbm_ofsdm);
351 iowrite32be(params->err_mask, ®s->fmbm_ofsem);
352 }
353
354 /* Statistics counters */
355 tmp = 0;
356 if (cfg->stats_counters_enable)
357 tmp = BMI_COUNTERS_EN;
358 iowrite32be(tmp, ®s->fmbm_ostc);
359
360 /* Performance counters */
361 fman_port_set_perf_cnt_params(port, &cfg->perf_cnt_params);
362 tmp = 0;
363 if (cfg->perf_counters_enable)
364 tmp = BMI_COUNTERS_EN;
365 iowrite32be(tmp, ®s->fmbm_opc);
366
367 return 0;
368 }
369
init_qmi(struct fman_port * port,struct fman_port_cfg * cfg,struct fman_port_params * params)370 static int init_qmi(struct fman_port *port,
371 struct fman_port_cfg *cfg,
372 struct fman_port_params *params)
373 {
374 struct fman_port_qmi_regs *regs = port->qmi_regs;
375 uint32_t tmp;
376
377 tmp = 0;
378 if (cfg->queue_counters_enable)
379 tmp |= QMI_PORT_CFG_EN_COUNTERS;
380 iowrite32be(tmp, ®s->fmqm_pnc);
381
382 /* Rx port configuration */
383 if ((port->type == E_FMAN_PORT_TYPE_RX) ||
384 (port->type == E_FMAN_PORT_TYPE_RX_10G)) {
385 /* Enqueue NIA */
386 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, ®s->fmqm_pnen);
387 return 0;
388 }
389
390 /* Continue with Tx and O/H port configuration */
391 if ((port->type == E_FMAN_PORT_TYPE_TX) ||
392 (port->type == E_FMAN_PORT_TYPE_TX_10G)) {
393 /* Enqueue NIA */
394 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
395 ®s->fmqm_pnen);
396 /* Dequeue NIA */
397 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, ®s->fmqm_pndn);
398 } else {
399 /* Enqueue NIA */
400 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, ®s->fmqm_pnen);
401 /* Dequeue NIA */
402 iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_FETCH, ®s->fmqm_pndn);
403 }
404
405 /* Dequeue Configuration register */
406 tmp = 0;
407 if (cfg->deq_high_pri)
408 tmp |= QMI_DEQ_CFG_PRI;
409
410 switch (cfg->deq_type) {
411 case E_FMAN_PORT_DEQ_BY_PRI:
412 tmp |= QMI_DEQ_CFG_TYPE1;
413 break;
414 case E_FMAN_PORT_DEQ_ACTIVE_FQ:
415 tmp |= QMI_DEQ_CFG_TYPE2;
416 break;
417 case E_FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
418 tmp |= QMI_DEQ_CFG_TYPE3;
419 break;
420 default:
421 return -EINVAL;
422 }
423
424 if (cfg->qmi_deq_options_support) {
425 if ((port->type == E_FMAN_PORT_TYPE_HC) &&
426 (cfg->deq_prefetch_opt != E_FMAN_PORT_DEQ_NO_PREFETCH))
427 return -EINVAL;
428
429 switch (cfg->deq_prefetch_opt) {
430 case E_FMAN_PORT_DEQ_NO_PREFETCH:
431 break;
432 case E_FMAN_PORT_DEQ_PART_PREFETCH:
433 tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
434 break;
435 case E_FMAN_PORT_DEQ_FULL_PREFETCH:
436 tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
437 break;
438 default:
439 return -EINVAL;
440 }
441 }
442 tmp |= (uint32_t)(params->deq_sp & QMI_DEQ_CFG_SP_MASK) <<
443 QMI_DEQ_CFG_SP_SHIFT;
444 tmp |= cfg->deq_byte_cnt;
445 iowrite32be(tmp, ®s->fmqm_pndc);
446
447 return 0;
448 }
449
get_rx_stats_reg(struct fman_port * port,enum fman_port_stats_counters counter,uint32_t ** stats_reg)450 static void get_rx_stats_reg(struct fman_port *port,
451 enum fman_port_stats_counters counter,
452 uint32_t **stats_reg)
453 {
454 struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
455
456 switch (counter) {
457 case E_FMAN_PORT_STATS_CNT_FRAME:
458 *stats_reg = ®s->fmbm_rfrc;
459 break;
460 case E_FMAN_PORT_STATS_CNT_DISCARD:
461 *stats_reg = ®s->fmbm_rfdc;
462 break;
463 case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
464 *stats_reg = ®s->fmbm_rbdc;
465 break;
466 case E_FMAN_PORT_STATS_CNT_RX_BAD_FRAME:
467 *stats_reg = ®s->fmbm_rfbc;
468 break;
469 case E_FMAN_PORT_STATS_CNT_RX_LARGE_FRAME:
470 *stats_reg = ®s->fmbm_rlfc;
471 break;
472 case E_FMAN_PORT_STATS_CNT_RX_OUT_OF_BUF:
473 *stats_reg = ®s->fmbm_rodc;
474 break;
475 case E_FMAN_PORT_STATS_CNT_FILTERED_FRAME:
476 *stats_reg = ®s->fmbm_rffc;
477 break;
478 case E_FMAN_PORT_STATS_CNT_DMA_ERR:
479 *stats_reg = ®s->fmbm_rfldec;
480 break;
481 default:
482 *stats_reg = NULL;
483 }
484 }
485
get_tx_stats_reg(struct fman_port * port,enum fman_port_stats_counters counter,uint32_t ** stats_reg)486 static void get_tx_stats_reg(struct fman_port *port,
487 enum fman_port_stats_counters counter,
488 uint32_t **stats_reg)
489 {
490 struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
491
492 switch (counter) {
493 case E_FMAN_PORT_STATS_CNT_FRAME:
494 *stats_reg = ®s->fmbm_tfrc;
495 break;
496 case E_FMAN_PORT_STATS_CNT_DISCARD:
497 *stats_reg = ®s->fmbm_tfdc;
498 break;
499 case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
500 *stats_reg = ®s->fmbm_tbdc;
501 break;
502 case E_FMAN_PORT_STATS_CNT_LEN_ERR:
503 *stats_reg = ®s->fmbm_tfledc;
504 break;
505 case E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT:
506 *stats_reg = ®s->fmbm_tfufdc;
507 break;
508 default:
509 *stats_reg = NULL;
510 }
511 }
512
get_oh_stats_reg(struct fman_port * port,enum fman_port_stats_counters counter,uint32_t ** stats_reg)513 static void get_oh_stats_reg(struct fman_port *port,
514 enum fman_port_stats_counters counter,
515 uint32_t **stats_reg)
516 {
517 struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
518
519 switch (counter) {
520 case E_FMAN_PORT_STATS_CNT_FRAME:
521 *stats_reg = ®s->fmbm_ofrc;
522 break;
523 case E_FMAN_PORT_STATS_CNT_DISCARD:
524 *stats_reg = ®s->fmbm_ofdc;
525 break;
526 case E_FMAN_PORT_STATS_CNT_DEALLOC_BUF:
527 *stats_reg = ®s->fmbm_obdc;
528 break;
529 case E_FMAN_PORT_STATS_CNT_FILTERED_FRAME:
530 *stats_reg = ®s->fmbm_offc;
531 break;
532 case E_FMAN_PORT_STATS_CNT_DMA_ERR:
533 *stats_reg = ®s->fmbm_ofldec;
534 break;
535 case E_FMAN_PORT_STATS_CNT_LEN_ERR:
536 *stats_reg = ®s->fmbm_ofledc;
537 break;
538 case E_FMAN_PORT_STATS_CNT_UNSUPPORTED_FORMAT:
539 *stats_reg = ®s->fmbm_ofufdc;
540 break;
541 case E_FMAN_PORT_STATS_CNT_WRED_DISCARD:
542 *stats_reg = ®s->fmbm_ofwdc;
543 break;
544 default:
545 *stats_reg = NULL;
546 }
547 }
548
get_rx_perf_reg(struct fman_port * port,enum fman_port_perf_counters counter,uint32_t ** perf_reg)549 static void get_rx_perf_reg(struct fman_port *port,
550 enum fman_port_perf_counters counter,
551 uint32_t **perf_reg)
552 {
553 struct fman_port_rx_bmi_regs *regs = &port->bmi_regs->rx;
554
555 switch (counter) {
556 case E_FMAN_PORT_PERF_CNT_CYCLE:
557 *perf_reg = ®s->fmbm_rccn;
558 break;
559 case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
560 *perf_reg = ®s->fmbm_rtuc;
561 break;
562 case E_FMAN_PORT_PERF_CNT_QUEUE_UTIL:
563 *perf_reg = ®s->fmbm_rrquc;
564 break;
565 case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
566 *perf_reg = ®s->fmbm_rduc;
567 break;
568 case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
569 *perf_reg = ®s->fmbm_rfuc;
570 break;
571 case E_FMAN_PORT_PERF_CNT_RX_PAUSE:
572 *perf_reg = ®s->fmbm_rpac;
573 break;
574 default:
575 *perf_reg = NULL;
576 }
577 }
578
get_tx_perf_reg(struct fman_port * port,enum fman_port_perf_counters counter,uint32_t ** perf_reg)579 static void get_tx_perf_reg(struct fman_port *port,
580 enum fman_port_perf_counters counter,
581 uint32_t **perf_reg)
582 {
583 struct fman_port_tx_bmi_regs *regs = &port->bmi_regs->tx;
584
585 switch (counter) {
586 case E_FMAN_PORT_PERF_CNT_CYCLE:
587 *perf_reg = ®s->fmbm_tccn;
588 break;
589 case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
590 *perf_reg = ®s->fmbm_ttuc;
591 break;
592 case E_FMAN_PORT_PERF_CNT_QUEUE_UTIL:
593 *perf_reg = ®s->fmbm_ttcquc;
594 break;
595 case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
596 *perf_reg = ®s->fmbm_tduc;
597 break;
598 case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
599 *perf_reg = ®s->fmbm_tfuc;
600 break;
601 default:
602 *perf_reg = NULL;
603 }
604 }
605
get_oh_perf_reg(struct fman_port * port,enum fman_port_perf_counters counter,uint32_t ** perf_reg)606 static void get_oh_perf_reg(struct fman_port *port,
607 enum fman_port_perf_counters counter,
608 uint32_t **perf_reg)
609 {
610 struct fman_port_oh_bmi_regs *regs = &port->bmi_regs->oh;
611
612 switch (counter) {
613 case E_FMAN_PORT_PERF_CNT_CYCLE:
614 *perf_reg = ®s->fmbm_occn;
615 break;
616 case E_FMAN_PORT_PERF_CNT_TASK_UTIL:
617 *perf_reg = ®s->fmbm_otuc;
618 break;
619 case E_FMAN_PORT_PERF_CNT_DMA_UTIL:
620 *perf_reg = ®s->fmbm_oduc;
621 break;
622 case E_FMAN_PORT_PERF_CNT_FIFO_UTIL:
623 *perf_reg = ®s->fmbm_ofuc;
624 break;
625 default:
626 *perf_reg = NULL;
627 }
628 }
629
get_qmi_counter_reg(struct fman_port * port,enum fman_port_qmi_counters counter,uint32_t ** queue_reg)630 static void get_qmi_counter_reg(struct fman_port *port,
631 enum fman_port_qmi_counters counter,
632 uint32_t **queue_reg)
633 {
634 struct fman_port_qmi_regs *regs = port->qmi_regs;
635
636 switch (counter) {
637 case E_FMAN_PORT_ENQ_TOTAL:
638 *queue_reg = ®s->fmqm_pnetfc;
639 break;
640 case E_FMAN_PORT_DEQ_TOTAL:
641 if ((port->type == E_FMAN_PORT_TYPE_RX) ||
642 (port->type == E_FMAN_PORT_TYPE_RX_10G))
643 /* Counter not available for Rx ports */
644 *queue_reg = NULL;
645 else
646 *queue_reg = ®s->fmqm_pndtfc;
647 break;
648 case E_FMAN_PORT_DEQ_FROM_DFLT:
649 if ((port->type == E_FMAN_PORT_TYPE_RX) ||
650 (port->type == E_FMAN_PORT_TYPE_RX_10G))
651 /* Counter not available for Rx ports */
652 *queue_reg = NULL;
653 else
654 *queue_reg = ®s->fmqm_pndfdc;
655 break;
656 case E_FMAN_PORT_DEQ_CONFIRM:
657 if ((port->type == E_FMAN_PORT_TYPE_RX) ||
658 (port->type == E_FMAN_PORT_TYPE_RX_10G))
659 /* Counter not available for Rx ports */
660 *queue_reg = NULL;
661 else
662 *queue_reg = ®s->fmqm_pndcc;
663 break;
664 default:
665 *queue_reg = NULL;
666 }
667 }
668
fman_port_defconfig(struct fman_port_cfg * cfg,enum fman_port_type type)669 void fman_port_defconfig(struct fman_port_cfg *cfg, enum fman_port_type type)
670 {
671 cfg->dma_swap_data = E_FMAN_PORT_DMA_NO_SWAP;
672 cfg->dma_ic_stash_on = FALSE;
673 cfg->dma_header_stash_on = FALSE;
674 cfg->dma_sg_stash_on = FALSE;
675 cfg->dma_write_optimize = TRUE;
676 cfg->color = E_FMAN_PORT_COLOR_GREEN;
677 cfg->discard_override = FALSE;
678 cfg->checksum_bytes_ignore = 0;
679 cfg->rx_cut_end_bytes = 4;
680 cfg->rx_pri_elevation = ((0x3FF + 1) * FMAN_PORT_BMI_FIFO_UNITS);
681 cfg->rx_fifo_thr = ((0x3FF + 1) * FMAN_PORT_BMI_FIFO_UNITS);
682 cfg->rx_fd_bits = 0;
683 cfg->ic_ext_offset = 0;
684 cfg->ic_int_offset = 0;
685 cfg->ic_size = 0;
686 cfg->int_buf_start_margin = 0;
687 cfg->ext_buf_start_margin = 0;
688 cfg->ext_buf_end_margin = 0;
689 cfg->tx_fifo_min_level = 0;
690 cfg->tx_fifo_low_comf_level = (5 * KILOBYTE);
691 cfg->stats_counters_enable = TRUE;
692 cfg->perf_counters_enable = TRUE;
693 cfg->deq_type = E_FMAN_PORT_DEQ_BY_PRI;
694
695 if (type == E_FMAN_PORT_TYPE_HC) {
696 cfg->sync_req = FALSE;
697 cfg->deq_prefetch_opt = E_FMAN_PORT_DEQ_NO_PREFETCH;
698 } else {
699 cfg->sync_req = TRUE;
700 cfg->deq_prefetch_opt = E_FMAN_PORT_DEQ_FULL_PREFETCH;
701 }
702
703 if (type == E_FMAN_PORT_TYPE_TX_10G) {
704 cfg->tx_fifo_deq_pipeline_depth = 4;
705 cfg->deq_high_pri = TRUE;
706 cfg->deq_byte_cnt = 0x1400;
707 } else {
708 if ((type == E_FMAN_PORT_TYPE_HC) ||
709 (type == E_FMAN_PORT_TYPE_OP))
710 cfg->tx_fifo_deq_pipeline_depth = 2;
711 else
712 cfg->tx_fifo_deq_pipeline_depth = 1;
713
714 cfg->deq_high_pri = FALSE;
715 cfg->deq_byte_cnt = 0x400;
716 }
717 cfg->no_scatter_gather = DEFAULT_FMAN_SP_NO_SCATTER_GATHER;
718 }
719
fman_port_find_bpool(struct fman_port * port,uint8_t bpid)720 static uint8_t fman_port_find_bpool(struct fman_port *port, uint8_t bpid)
721 {
722 uint32_t *bp_reg, tmp;
723 uint8_t i, id;
724
725 /* Find the pool */
726 bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
727 for (i = 0;
728 (i < port->ext_pools_num && (i < FMAN_PORT_MAX_EXT_POOLS_NUM));
729 i++) {
730 tmp = ioread32be(&bp_reg[i]);
731 id = (uint8_t)((tmp & BMI_EXT_BUF_POOL_ID_MASK) >>
732 BMI_EXT_BUF_POOL_ID_SHIFT);
733
734 if (id == bpid)
735 break;
736 }
737
738 return i;
739 }
740
fman_port_init(struct fman_port * port,struct fman_port_cfg * cfg,struct fman_port_params * params)741 int fman_port_init(struct fman_port *port,
742 struct fman_port_cfg *cfg,
743 struct fman_port_params *params)
744 {
745 int err;
746
747 /* Init BMI registers */
748 switch (port->type) {
749 case E_FMAN_PORT_TYPE_RX:
750 case E_FMAN_PORT_TYPE_RX_10G:
751 err = init_bmi_rx(port, cfg, params);
752 break;
753 case E_FMAN_PORT_TYPE_TX:
754 case E_FMAN_PORT_TYPE_TX_10G:
755 err = init_bmi_tx(port, cfg, params);
756 break;
757 case E_FMAN_PORT_TYPE_OP:
758 case E_FMAN_PORT_TYPE_HC:
759 err = init_bmi_oh(port, cfg, params);
760 break;
761 default:
762 return -EINVAL;
763 }
764
765 if (err)
766 return err;
767
768 /* Init QMI registers */
769 if (!port->im_en)
770 {
771 err = init_qmi(port, cfg, params);
772 return err;
773 }
774 return 0;
775 }
776
fman_port_enable(struct fman_port * port)777 int fman_port_enable(struct fman_port *port)
778 {
779 uint32_t *bmi_cfg_reg, tmp;
780 bool rx_port;
781
782 switch (port->type) {
783 case E_FMAN_PORT_TYPE_RX:
784 case E_FMAN_PORT_TYPE_RX_10G:
785 bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
786 rx_port = TRUE;
787 break;
788 case E_FMAN_PORT_TYPE_TX:
789 case E_FMAN_PORT_TYPE_TX_10G:
790 bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
791 rx_port = FALSE;
792 break;
793 case E_FMAN_PORT_TYPE_OP:
794 case E_FMAN_PORT_TYPE_HC:
795 bmi_cfg_reg = &port->bmi_regs->oh.fmbm_ocfg;
796 rx_port = FALSE;
797 break;
798 default:
799 return -EINVAL;
800 }
801
802 /* Enable QMI */
803 if (!rx_port) {
804 tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
805 iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
806 }
807
808 /* Enable BMI */
809 tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
810 iowrite32be(tmp, bmi_cfg_reg);
811
812 return 0;
813 }
814
fman_port_disable(const struct fman_port * port)815 int fman_port_disable(const struct fman_port *port)
816 {
817 uint32_t *bmi_cfg_reg, *bmi_status_reg, tmp;
818 bool rx_port, failure = FALSE;
819 int count;
820
821 switch (port->type) {
822 case E_FMAN_PORT_TYPE_RX:
823 case E_FMAN_PORT_TYPE_RX_10G:
824 bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
825 bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
826 rx_port = TRUE;
827 break;
828 case E_FMAN_PORT_TYPE_TX:
829 case E_FMAN_PORT_TYPE_TX_10G:
830 bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
831 bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
832 rx_port = FALSE;
833 break;
834 case E_FMAN_PORT_TYPE_OP:
835 case E_FMAN_PORT_TYPE_HC:
836 bmi_cfg_reg = &port->bmi_regs->oh.fmbm_ocfg;
837 bmi_status_reg = &port->bmi_regs->oh.fmbm_ost;
838 rx_port = FALSE;
839 break;
840 default:
841 return -EINVAL;
842 }
843
844 /* Disable QMI */
845 if (!rx_port) {
846 tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
847 iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
848
849 /* Wait for QMI to finish FD handling */
850 count = 100;
851 do {
852 DELAY(10);
853 tmp = ioread32be(&port->qmi_regs->fmqm_pns);
854 } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
855
856 if (count == 0)
857 {
858 /* Timeout */
859 failure = TRUE;
860 }
861 }
862
863 /* Disable BMI */
864 tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
865 iowrite32be(tmp, bmi_cfg_reg);
866
867 /* Wait for graceful stop end */
868 count = 500;
869 do {
870 DELAY(10);
871 tmp = ioread32be(bmi_status_reg);
872 } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
873
874 if (count == 0)
875 {
876 /* Timeout */
877 failure = TRUE;
878 }
879
880 if (failure)
881 return -EBUSY;
882
883 return 0;
884 }
885
fman_port_set_bpools(const struct fman_port * port,const struct fman_port_bpools * bp)886 int fman_port_set_bpools(const struct fman_port *port,
887 const struct fman_port_bpools *bp)
888 {
889 uint32_t tmp, *bp_reg, *bp_depl_reg;
890 uint8_t i, max_bp_num;
891 bool grp_depl_used = FALSE, rx_port;
892
893 switch (port->type) {
894 case E_FMAN_PORT_TYPE_RX:
895 case E_FMAN_PORT_TYPE_RX_10G:
896 max_bp_num = port->ext_pools_num;
897 rx_port = TRUE;
898 bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
899 bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
900 break;
901 case E_FMAN_PORT_TYPE_OP:
902 if (port->fm_rev_maj != 4)
903 return -EINVAL;
904 max_bp_num = FMAN_PORT_OBS_EXT_POOLS_NUM;
905 rx_port = FALSE;
906 bp_reg = port->bmi_regs->oh.fmbm_oebmpi;
907 bp_depl_reg = &port->bmi_regs->oh.fmbm_ompd;
908 break;
909 default:
910 return -EINVAL;
911 }
912
913 if (rx_port) {
914 /* Check buffers are provided in ascending order */
915 for (i = 0;
916 (i < (bp->count-1) && (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1));
917 i++) {
918 if (bp->bpool[i].size > bp->bpool[i+1].size)
919 return -EINVAL;
920 }
921 }
922
923 /* Set up external buffers pools */
924 for (i = 0; i < bp->count; i++) {
925 tmp = BMI_EXT_BUF_POOL_VALID;
926 tmp |= ((uint32_t)bp->bpool[i].bpid <<
927 BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
928
929 if (rx_port) {
930 if (bp->counters_enable)
931 tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
932
933 if (bp->bpool[i].is_backup)
934 tmp |= BMI_EXT_BUF_POOL_BACKUP;
935
936 tmp |= (uint32_t)bp->bpool[i].size;
937 }
938
939 iowrite32be(tmp, &bp_reg[i]);
940 }
941
942 /* Clear unused pools */
943 for (i = bp->count; i < max_bp_num; i++)
944 iowrite32be(0, &bp_reg[i]);
945
946 /* Pools depletion */
947 tmp = 0;
948 for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
949 if (bp->bpool[i].grp_bp_depleted) {
950 grp_depl_used = TRUE;
951 tmp |= 0x80000000 >> i;
952 }
953
954 if (bp->bpool[i].single_bp_depleted)
955 tmp |= 0x80 >> i;
956
957 if (bp->bpool[i].pfc_priorities_en)
958 tmp |= 0x0100 << i;
959 }
960
961 if (grp_depl_used)
962 tmp |= ((uint32_t)bp->grp_bp_depleted_num - 1) <<
963 BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
964
965 iowrite32be(tmp, bp_depl_reg);
966 return 0;
967 }
968
fman_port_set_rate_limiter(struct fman_port * port,struct fman_port_rate_limiter * rate_limiter)969 int fman_port_set_rate_limiter(struct fman_port *port,
970 struct fman_port_rate_limiter *rate_limiter)
971 {
972 uint32_t *rate_limit_reg, *rate_limit_scale_reg;
973 uint32_t granularity, tmp;
974 uint8_t usec_bit, factor;
975
976 switch (port->type) {
977 case E_FMAN_PORT_TYPE_TX:
978 case E_FMAN_PORT_TYPE_TX_10G:
979 rate_limit_reg = &port->bmi_regs->tx.fmbm_trlmt;
980 rate_limit_scale_reg = &port->bmi_regs->tx.fmbm_trlmts;
981 granularity = BMI_RATE_LIMIT_GRAN_TX;
982 break;
983 case E_FMAN_PORT_TYPE_OP:
984 rate_limit_reg = &port->bmi_regs->oh.fmbm_orlmt;
985 rate_limit_scale_reg = &port->bmi_regs->oh.fmbm_orlmts;
986 granularity = BMI_RATE_LIMIT_GRAN_OP;
987 break;
988 default:
989 return -EINVAL;
990 }
991
992 /* Factor is per 1 usec count */
993 factor = 1;
994 usec_bit = rate_limiter->count_1micro_bit;
995
996 /* If rate limit is too small for an 1usec factor, adjust timestamp
997 * scale and multiply the factor */
998 while (rate_limiter->rate < (granularity / factor)) {
999 if (usec_bit == 31)
1000 /* Can't configure rate limiter - rate is too small */
1001 return -EINVAL;
1002
1003 usec_bit++;
1004 factor <<= 1;
1005 }
1006
1007 /* Figure out register value. The "while" above quarantees that
1008 * (rate_limiter->rate * factor / granularity) >= 1 */
1009 tmp = (uint32_t)(rate_limiter->rate * factor / granularity - 1);
1010
1011 /* Check rate limit isn't too large */
1012 if (tmp >= BMI_RATE_LIMIT_MAX_RATE_IN_GRAN_UNITS)
1013 return -EINVAL;
1014
1015 /* Check burst size is in allowed range */
1016 if ((rate_limiter->burst_size == 0) ||
1017 (rate_limiter->burst_size >
1018 BMI_RATE_LIMIT_MAX_BURST_SIZE))
1019 return -EINVAL;
1020
1021 tmp |= (uint32_t)(rate_limiter->burst_size - 1) <<
1022 BMI_RATE_LIMIT_MAX_BURST_SHIFT;
1023
1024 if ((port->type == E_FMAN_PORT_TYPE_OP) &&
1025 (port->fm_rev_maj == 4)) {
1026 if (rate_limiter->high_burst_size_gran)
1027 tmp |= BMI_RATE_LIMIT_HIGH_BURST_SIZE_GRAN;
1028 }
1029
1030 iowrite32be(tmp, rate_limit_reg);
1031
1032 /* Set up rate limiter scale register */
1033 tmp = BMI_RATE_LIMIT_SCALE_EN;
1034 tmp |= (31 - (uint32_t)usec_bit) << BMI_RATE_LIMIT_SCALE_TSBS_SHIFT;
1035
1036 if ((port->type == E_FMAN_PORT_TYPE_OP) &&
1037 (port->fm_rev_maj == 4))
1038 tmp |= rate_limiter->rate_factor;
1039
1040 iowrite32be(tmp, rate_limit_scale_reg);
1041
1042 return 0;
1043 }
1044
fman_port_delete_rate_limiter(struct fman_port * port)1045 int fman_port_delete_rate_limiter(struct fman_port *port)
1046 {
1047 uint32_t *rate_limit_scale_reg;
1048
1049 switch (port->type) {
1050 case E_FMAN_PORT_TYPE_TX:
1051 case E_FMAN_PORT_TYPE_TX_10G:
1052 rate_limit_scale_reg = &port->bmi_regs->tx.fmbm_trlmts;
1053 break;
1054 case E_FMAN_PORT_TYPE_OP:
1055 rate_limit_scale_reg = &port->bmi_regs->oh.fmbm_orlmts;
1056 break;
1057 default:
1058 return -EINVAL;
1059 }
1060
1061 iowrite32be(0, rate_limit_scale_reg);
1062 return 0;
1063 }
1064
fman_port_set_err_mask(struct fman_port * port,uint32_t err_mask)1065 int fman_port_set_err_mask(struct fman_port *port, uint32_t err_mask)
1066 {
1067 uint32_t *err_mask_reg;
1068
1069 /* Obtain register address */
1070 switch (port->type) {
1071 case E_FMAN_PORT_TYPE_RX:
1072 case E_FMAN_PORT_TYPE_RX_10G:
1073 err_mask_reg = &port->bmi_regs->rx.fmbm_rfsem;
1074 break;
1075 case E_FMAN_PORT_TYPE_OP:
1076 err_mask_reg = &port->bmi_regs->oh.fmbm_ofsem;
1077 break;
1078 default:
1079 return -EINVAL;
1080 }
1081
1082 iowrite32be(err_mask, err_mask_reg);
1083 return 0;
1084 }
1085
fman_port_set_discard_mask(struct fman_port * port,uint32_t discard_mask)1086 int fman_port_set_discard_mask(struct fman_port *port, uint32_t discard_mask)
1087 {
1088 uint32_t *discard_mask_reg;
1089
1090 /* Obtain register address */
1091 switch (port->type) {
1092 case E_FMAN_PORT_TYPE_RX:
1093 case E_FMAN_PORT_TYPE_RX_10G:
1094 discard_mask_reg = &port->bmi_regs->rx.fmbm_rfsdm;
1095 break;
1096 case E_FMAN_PORT_TYPE_OP:
1097 discard_mask_reg = &port->bmi_regs->oh.fmbm_ofsdm;
1098 break;
1099 default:
1100 return -EINVAL;
1101 }
1102
1103 iowrite32be(discard_mask, discard_mask_reg);
1104 return 0;
1105 }
1106
fman_port_modify_rx_fd_bits(struct fman_port * port,uint8_t rx_fd_bits,bool add)1107 int fman_port_modify_rx_fd_bits(struct fman_port *port,
1108 uint8_t rx_fd_bits,
1109 bool add)
1110 {
1111 uint32_t tmp;
1112
1113 switch (port->type) {
1114 case E_FMAN_PORT_TYPE_RX:
1115 case E_FMAN_PORT_TYPE_RX_10G:
1116 break;
1117 default:
1118 return -EINVAL;
1119 }
1120
1121 tmp = ioread32be(&port->bmi_regs->rx.fmbm_rfne);
1122
1123 if (add)
1124 tmp |= (uint32_t)rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
1125 else
1126 tmp &= ~((uint32_t)rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT);
1127
1128 iowrite32be(tmp, &port->bmi_regs->rx.fmbm_rfne);
1129 return 0;
1130 }
1131
fman_port_set_perf_cnt_params(struct fman_port * port,struct fman_port_perf_cnt_params * params)1132 int fman_port_set_perf_cnt_params(struct fman_port *port,
1133 struct fman_port_perf_cnt_params *params)
1134 {
1135 uint32_t *pcp_reg, tmp;
1136
1137 /* Obtain register address and check parameters are in range */
1138 switch (port->type) {
1139 case E_FMAN_PORT_TYPE_RX:
1140 case E_FMAN_PORT_TYPE_RX_10G:
1141 pcp_reg = &port->bmi_regs->rx.fmbm_rpcp;
1142 if ((params->queue_val == 0) ||
1143 (params->queue_val > MAX_PERFORMANCE_RX_QUEUE_COMP))
1144 return -EINVAL;
1145 break;
1146 case E_FMAN_PORT_TYPE_TX:
1147 case E_FMAN_PORT_TYPE_TX_10G:
1148 pcp_reg = &port->bmi_regs->tx.fmbm_tpcp;
1149 if ((params->queue_val == 0) ||
1150 (params->queue_val > MAX_PERFORMANCE_TX_QUEUE_COMP))
1151 return -EINVAL;
1152 break;
1153 case E_FMAN_PORT_TYPE_OP:
1154 case E_FMAN_PORT_TYPE_HC:
1155 pcp_reg = &port->bmi_regs->oh.fmbm_opcp;
1156 if (params->queue_val != 0)
1157 return -EINVAL;
1158 break;
1159 default:
1160 return -EINVAL;
1161 }
1162
1163 if ((params->task_val == 0) ||
1164 (params->task_val > MAX_PERFORMANCE_TASK_COMP))
1165 return -EINVAL;
1166 if ((params->dma_val == 0) ||
1167 (params->dma_val > MAX_PERFORMANCE_DMA_COMP))
1168 return -EINVAL;
1169 if ((params->fifo_val == 0) ||
1170 ((params->fifo_val / FMAN_PORT_BMI_FIFO_UNITS) >
1171 MAX_PERFORMANCE_FIFO_COMP))
1172 return -EINVAL;
1173 tmp = (uint32_t)(params->task_val - 1) <<
1174 BMI_PERFORMANCE_TASK_COMP_SHIFT;
1175 tmp |= (uint32_t)(params->dma_val - 1) <<
1176 BMI_PERFORMANCE_DMA_COMP_SHIFT;
1177 tmp |= (uint32_t)(params->fifo_val / FMAN_PORT_BMI_FIFO_UNITS - 1);
1178
1179 switch (port->type) {
1180 case E_FMAN_PORT_TYPE_RX:
1181 case E_FMAN_PORT_TYPE_RX_10G:
1182 case E_FMAN_PORT_TYPE_TX:
1183 case E_FMAN_PORT_TYPE_TX_10G:
1184 tmp |= (uint32_t)(params->queue_val - 1) <<
1185 BMI_PERFORMANCE_QUEUE_COMP_SHIFT;
1186 break;
1187 default:
1188 break;
1189 }
1190
1191
1192 iowrite32be(tmp, pcp_reg);
1193 return 0;
1194 }
1195
fman_port_set_stats_cnt_mode(struct fman_port * port,bool enable)1196 int fman_port_set_stats_cnt_mode(struct fman_port *port, bool enable)
1197 {
1198 uint32_t *stats_reg, tmp;
1199
1200 switch (port->type) {
1201 case E_FMAN_PORT_TYPE_RX:
1202 case E_FMAN_PORT_TYPE_RX_10G:
1203 stats_reg = &port->bmi_regs->rx.fmbm_rstc;
1204 break;
1205 case E_FMAN_PORT_TYPE_TX:
1206 case E_FMAN_PORT_TYPE_TX_10G:
1207 stats_reg = &port->bmi_regs->tx.fmbm_tstc;
1208 break;
1209 case E_FMAN_PORT_TYPE_OP:
1210 case E_FMAN_PORT_TYPE_HC:
1211 stats_reg = &port->bmi_regs->oh.fmbm_ostc;
1212 break;
1213 default:
1214 return -EINVAL;
1215 }
1216
1217 tmp = ioread32be(stats_reg);
1218
1219 if (enable)
1220 tmp |= BMI_COUNTERS_EN;
1221 else
1222 tmp &= ~BMI_COUNTERS_EN;
1223
1224 iowrite32be(tmp, stats_reg);
1225 return 0;
1226 }
1227
fman_port_set_perf_cnt_mode(struct fman_port * port,bool enable)1228 int fman_port_set_perf_cnt_mode(struct fman_port *port, bool enable)
1229 {
1230 uint32_t *stats_reg, tmp;
1231
1232 switch (port->type) {
1233 case E_FMAN_PORT_TYPE_RX:
1234 case E_FMAN_PORT_TYPE_RX_10G:
1235 stats_reg = &port->bmi_regs->rx.fmbm_rpc;
1236 break;
1237 case E_FMAN_PORT_TYPE_TX:
1238 case E_FMAN_PORT_TYPE_TX_10G:
1239 stats_reg = &port->bmi_regs->tx.fmbm_tpc;
1240 break;
1241 case E_FMAN_PORT_TYPE_OP:
1242 case E_FMAN_PORT_TYPE_HC:
1243 stats_reg = &port->bmi_regs->oh.fmbm_opc;
1244 break;
1245 default:
1246 return -EINVAL;
1247 }
1248
1249 tmp = ioread32be(stats_reg);
1250
1251 if (enable)
1252 tmp |= BMI_COUNTERS_EN;
1253 else
1254 tmp &= ~BMI_COUNTERS_EN;
1255
1256 iowrite32be(tmp, stats_reg);
1257 return 0;
1258 }
1259
fman_port_set_queue_cnt_mode(struct fman_port * port,bool enable)1260 int fman_port_set_queue_cnt_mode(struct fman_port *port, bool enable)
1261 {
1262 uint32_t tmp;
1263
1264 tmp = ioread32be(&port->qmi_regs->fmqm_pnc);
1265
1266 if (enable)
1267 tmp |= QMI_PORT_CFG_EN_COUNTERS;
1268 else
1269 tmp &= ~QMI_PORT_CFG_EN_COUNTERS;
1270
1271 iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
1272 return 0;
1273 }
1274
fman_port_set_bpool_cnt_mode(struct fman_port * port,uint8_t bpid,bool enable)1275 int fman_port_set_bpool_cnt_mode(struct fman_port *port,
1276 uint8_t bpid,
1277 bool enable)
1278 {
1279 uint8_t index;
1280 uint32_t tmp;
1281
1282 switch (port->type) {
1283 case E_FMAN_PORT_TYPE_RX:
1284 case E_FMAN_PORT_TYPE_RX_10G:
1285 break;
1286 default:
1287 return -EINVAL;
1288 }
1289
1290 /* Find the pool */
1291 index = fman_port_find_bpool(port, bpid);
1292 if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
1293 /* Not found */
1294 return -EINVAL;
1295
1296 tmp = ioread32be(&port->bmi_regs->rx.fmbm_ebmpi[index]);
1297
1298 if (enable)
1299 tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
1300 else
1301 tmp &= ~BMI_EXT_BUF_POOL_EN_COUNTER;
1302
1303 iowrite32be(tmp, &port->bmi_regs->rx.fmbm_ebmpi[index]);
1304 return 0;
1305 }
1306
fman_port_get_stats_counter(struct fman_port * port,enum fman_port_stats_counters counter)1307 uint32_t fman_port_get_stats_counter(struct fman_port *port,
1308 enum fman_port_stats_counters counter)
1309 {
1310 uint32_t *stats_reg, ret_val;
1311
1312 switch (port->type) {
1313 case E_FMAN_PORT_TYPE_RX:
1314 case E_FMAN_PORT_TYPE_RX_10G:
1315 get_rx_stats_reg(port, counter, &stats_reg);
1316 break;
1317 case E_FMAN_PORT_TYPE_TX:
1318 case E_FMAN_PORT_TYPE_TX_10G:
1319 get_tx_stats_reg(port, counter, &stats_reg);
1320 break;
1321 case E_FMAN_PORT_TYPE_OP:
1322 case E_FMAN_PORT_TYPE_HC:
1323 get_oh_stats_reg(port, counter, &stats_reg);
1324 break;
1325 default:
1326 stats_reg = NULL;
1327 }
1328
1329 if (stats_reg == NULL)
1330 return 0;
1331
1332 ret_val = ioread32be(stats_reg);
1333 return ret_val;
1334 }
1335
fman_port_set_stats_counter(struct fman_port * port,enum fman_port_stats_counters counter,uint32_t value)1336 void fman_port_set_stats_counter(struct fman_port *port,
1337 enum fman_port_stats_counters counter,
1338 uint32_t value)
1339 {
1340 uint32_t *stats_reg;
1341
1342 switch (port->type) {
1343 case E_FMAN_PORT_TYPE_RX:
1344 case E_FMAN_PORT_TYPE_RX_10G:
1345 get_rx_stats_reg(port, counter, &stats_reg);
1346 break;
1347 case E_FMAN_PORT_TYPE_TX:
1348 case E_FMAN_PORT_TYPE_TX_10G:
1349 get_tx_stats_reg(port, counter, &stats_reg);
1350 break;
1351 case E_FMAN_PORT_TYPE_OP:
1352 case E_FMAN_PORT_TYPE_HC:
1353 get_oh_stats_reg(port, counter, &stats_reg);
1354 break;
1355 default:
1356 stats_reg = NULL;
1357 }
1358
1359 if (stats_reg == NULL)
1360 return;
1361
1362 iowrite32be(value, stats_reg);
1363 }
1364
fman_port_get_perf_counter(struct fman_port * port,enum fman_port_perf_counters counter)1365 uint32_t fman_port_get_perf_counter(struct fman_port *port,
1366 enum fman_port_perf_counters counter)
1367 {
1368 uint32_t *perf_reg, ret_val;
1369
1370 switch (port->type) {
1371 case E_FMAN_PORT_TYPE_RX:
1372 case E_FMAN_PORT_TYPE_RX_10G:
1373 get_rx_perf_reg(port, counter, &perf_reg);
1374 break;
1375 case E_FMAN_PORT_TYPE_TX:
1376 case E_FMAN_PORT_TYPE_TX_10G:
1377 get_tx_perf_reg(port, counter, &perf_reg);
1378 break;
1379 case E_FMAN_PORT_TYPE_OP:
1380 case E_FMAN_PORT_TYPE_HC:
1381 get_oh_perf_reg(port, counter, &perf_reg);
1382 break;
1383 default:
1384 perf_reg = NULL;
1385 }
1386
1387 if (perf_reg == NULL)
1388 return 0;
1389
1390 ret_val = ioread32be(perf_reg);
1391 return ret_val;
1392 }
1393
fman_port_set_perf_counter(struct fman_port * port,enum fman_port_perf_counters counter,uint32_t value)1394 void fman_port_set_perf_counter(struct fman_port *port,
1395 enum fman_port_perf_counters counter,
1396 uint32_t value)
1397 {
1398 uint32_t *perf_reg;
1399
1400 switch (port->type) {
1401 case E_FMAN_PORT_TYPE_RX:
1402 case E_FMAN_PORT_TYPE_RX_10G:
1403 get_rx_perf_reg(port, counter, &perf_reg);
1404 break;
1405 case E_FMAN_PORT_TYPE_TX:
1406 case E_FMAN_PORT_TYPE_TX_10G:
1407 get_tx_perf_reg(port, counter, &perf_reg);
1408 break;
1409 case E_FMAN_PORT_TYPE_OP:
1410 case E_FMAN_PORT_TYPE_HC:
1411 get_oh_perf_reg(port, counter, &perf_reg);
1412 break;
1413 default:
1414 perf_reg = NULL;
1415 }
1416
1417 if (perf_reg == NULL)
1418 return;
1419
1420 iowrite32be(value, perf_reg);
1421 }
1422
fman_port_get_qmi_counter(struct fman_port * port,enum fman_port_qmi_counters counter)1423 uint32_t fman_port_get_qmi_counter(struct fman_port *port,
1424 enum fman_port_qmi_counters counter)
1425 {
1426 uint32_t *queue_reg, ret_val;
1427
1428 get_qmi_counter_reg(port, counter, &queue_reg);
1429
1430 if (queue_reg == NULL)
1431 return 0;
1432
1433 ret_val = ioread32be(queue_reg);
1434 return ret_val;
1435 }
1436
fman_port_set_qmi_counter(struct fman_port * port,enum fman_port_qmi_counters counter,uint32_t value)1437 void fman_port_set_qmi_counter(struct fman_port *port,
1438 enum fman_port_qmi_counters counter,
1439 uint32_t value)
1440 {
1441 uint32_t *queue_reg;
1442
1443 get_qmi_counter_reg(port, counter, &queue_reg);
1444
1445 if (queue_reg == NULL)
1446 return;
1447
1448 iowrite32be(value, queue_reg);
1449 }
1450
fman_port_get_bpool_counter(struct fman_port * port,uint8_t bpid)1451 uint32_t fman_port_get_bpool_counter(struct fman_port *port, uint8_t bpid)
1452 {
1453 uint8_t index;
1454 uint32_t ret_val;
1455
1456 switch (port->type) {
1457 case E_FMAN_PORT_TYPE_RX:
1458 case E_FMAN_PORT_TYPE_RX_10G:
1459 break;
1460 default:
1461 return 0;
1462 }
1463
1464 /* Find the pool */
1465 index = fman_port_find_bpool(port, bpid);
1466 if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
1467 /* Not found */
1468 return 0;
1469
1470 ret_val = ioread32be(&port->bmi_regs->rx.fmbm_acnt[index]);
1471 return ret_val;
1472 }
1473
fman_port_set_bpool_counter(struct fman_port * port,uint8_t bpid,uint32_t value)1474 void fman_port_set_bpool_counter(struct fman_port *port,
1475 uint8_t bpid,
1476 uint32_t value)
1477 {
1478 uint8_t index;
1479
1480 switch (port->type) {
1481 case E_FMAN_PORT_TYPE_RX:
1482 case E_FMAN_PORT_TYPE_RX_10G:
1483 break;
1484 default:
1485 return;
1486 }
1487
1488 /* Find the pool */
1489 index = fman_port_find_bpool(port, bpid);
1490 if (index == port->ext_pools_num || index == FMAN_PORT_MAX_EXT_POOLS_NUM)
1491 /* Not found */
1492 return;
1493
1494 iowrite32be(value, &port->bmi_regs->rx.fmbm_acnt[index]);
1495 }
1496
fman_port_add_congestion_grps(struct fman_port * port,uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])1497 int fman_port_add_congestion_grps(struct fman_port *port,
1498 uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])
1499 {
1500 int i;
1501 uint32_t tmp, *grp_map_reg;
1502 uint8_t max_grp_map_num;
1503
1504 switch (port->type) {
1505 case E_FMAN_PORT_TYPE_RX:
1506 case E_FMAN_PORT_TYPE_RX_10G:
1507 if (port->fm_rev_maj == 4)
1508 max_grp_map_num = 1;
1509 else
1510 max_grp_map_num = FMAN_PORT_CG_MAP_NUM;
1511 grp_map_reg = port->bmi_regs->rx.fmbm_rcgm;
1512 break;
1513 case E_FMAN_PORT_TYPE_OP:
1514 max_grp_map_num = 1;
1515 if (port->fm_rev_maj != 4)
1516 return -EINVAL;
1517 grp_map_reg = port->bmi_regs->oh.fmbm_ocgm;
1518 break;
1519 default:
1520 return -EINVAL;
1521 }
1522
1523 for (i = (max_grp_map_num - 1); i >= 0; i--) {
1524 if (grps_map[i] == 0)
1525 continue;
1526 tmp = ioread32be(&grp_map_reg[i]);
1527 tmp |= grps_map[i];
1528 iowrite32be(tmp, &grp_map_reg[i]);
1529 }
1530
1531 return 0;
1532 }
1533
fman_port_remove_congestion_grps(struct fman_port * port,uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])1534 int fman_port_remove_congestion_grps(struct fman_port *port,
1535 uint32_t grps_map[FMAN_PORT_CG_MAP_NUM])
1536 {
1537 int i;
1538 uint32_t tmp, *grp_map_reg;
1539 uint8_t max_grp_map_num;
1540
1541 switch (port->type) {
1542 case E_FMAN_PORT_TYPE_RX:
1543 case E_FMAN_PORT_TYPE_RX_10G:
1544 if (port->fm_rev_maj == 4)
1545 max_grp_map_num = 1;
1546 else
1547 max_grp_map_num = FMAN_PORT_CG_MAP_NUM;
1548 grp_map_reg = port->bmi_regs->rx.fmbm_rcgm;
1549 break;
1550 case E_FMAN_PORT_TYPE_OP:
1551 max_grp_map_num = 1;
1552 if (port->fm_rev_maj != 4)
1553 return -EINVAL;
1554 grp_map_reg = port->bmi_regs->oh.fmbm_ocgm;
1555 break;
1556 default:
1557 return -EINVAL;
1558 }
1559
1560 for (i = (max_grp_map_num - 1); i >= 0; i--) {
1561 if (grps_map[i] == 0)
1562 continue;
1563 tmp = ioread32be(&grp_map_reg[i]);
1564 tmp &= ~grps_map[i];
1565 iowrite32be(tmp, &grp_map_reg[i]);
1566 }
1567 return 0;
1568 }
1569