xref: /freebsd/sys/dev/bxe/ecore_init.h (revision 4ec234c813eed05c166859bba82c882e40826eb9)
1 /*-
2  * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written consent.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #ifndef ECORE_INIT_H
38 #define ECORE_INIT_H
39 
40 /* Init operation types and structures */
41 enum {
42 	OP_RD = 0x1,	/* read a single register */
43 	OP_WR,		/* write a single register */
44 	OP_SW,		/* copy a string to the device */
45 	OP_ZR,		/* clear memory */
46 	OP_ZP,		/* unzip then copy with DMAE */
47 	OP_WR_64,	/* write 64 bit pattern */
48 	OP_WB,		/* copy a string using DMAE */
49 #ifndef FW_ZIP_SUPPORT
50 	OP_FW,		/* copy an array from fw data (only used with unzipped FW) */
51 #endif
52 	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
53 	OP_IF_MODE_OR,  /* Skip the following ops if all init modes don't match */
54 	OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */
55 	OP_IF_PHASE,
56 	OP_RT,
57 	OP_DELAY,
58 	OP_VERIFY,
59 	OP_MAX
60 };
61 
62 enum {
63 	STAGE_START,
64 	STAGE_END,
65 };
66 
67 /* Returns the index of start or end of a specific block stage in ops array*/
68 #define BLOCK_OPS_IDX(block, stage, end) \
69 	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
70 
71 
72 /* structs for the various opcodes */
73 struct raw_op {
74 	uint32_t op:8;
75 	uint32_t offset:24;
76 	uint32_t raw_data;
77 };
78 
79 struct op_read {
80 	uint32_t op:8;
81 	uint32_t offset:24;
82 	uint32_t val;
83 };
84 
85 struct op_write {
86 	uint32_t op:8;
87 	uint32_t offset:24;
88 	uint32_t val;
89 };
90 
91 struct op_arr_write {
92 	uint32_t op:8;
93 	uint32_t offset:24;
94 #ifdef __BIG_ENDIAN
95 	uint16_t data_len;
96 	uint16_t data_off;
97 #else /* __LITTLE_ENDIAN */
98 	uint16_t data_off;
99 	uint16_t data_len;
100 #endif
101 };
102 
103 struct op_zero {
104 	uint32_t op:8;
105 	uint32_t offset:24;
106 	uint32_t len;
107 };
108 
109 struct op_if_mode {
110 	uint32_t op:8;
111 	uint32_t cmd_offset:24;
112 	uint32_t mode_bit_map;
113 };
114 
115 struct op_if_phase {
116 	uint32_t op:8;
117 	uint32_t cmd_offset:24;
118 	uint32_t phase_bit_map;
119 };
120 
121 struct op_delay {
122 	uint32_t op:8;
123 	uint32_t reserved:24;
124 	uint32_t delay;
125 };
126 
127 union init_op {
128 	struct op_read		read;
129 	struct op_write		write;
130 	struct op_arr_write	arr_wr;
131 	struct op_zero		zero;
132 	struct raw_op		raw;
133 	struct op_if_mode	if_mode;
134 	struct op_if_phase	if_phase;
135 	struct op_delay		delay;
136 };
137 
138 
139 /* Init Phases */
140 enum {
141 	PHASE_COMMON,
142 	PHASE_PORT0,
143 	PHASE_PORT1,
144 	PHASE_PF0,
145 	PHASE_PF1,
146 	PHASE_PF2,
147 	PHASE_PF3,
148 	PHASE_PF4,
149 	PHASE_PF5,
150 	PHASE_PF6,
151 	PHASE_PF7,
152 	NUM_OF_INIT_PHASES
153 };
154 
155 /* Init Modes */
156 enum {
157 	MODE_ASIC                      = 0x00000001,
158 	MODE_FPGA                      = 0x00000002,
159 	MODE_EMUL                      = 0x00000004,
160 	MODE_E2                        = 0x00000008,
161 	MODE_E3                        = 0x00000010,
162 	MODE_PORT2                     = 0x00000020,
163 	MODE_PORT4                     = 0x00000040,
164 	MODE_SF                        = 0x00000080,
165 	MODE_MF                        = 0x00000100,
166 	MODE_MF_SD                     = 0x00000200,
167 	MODE_MF_SI                     = 0x00000400,
168 	MODE_MF_AFEX                   = 0x00000800,
169 	MODE_E3_A0                     = 0x00001000,
170 	MODE_E3_B0                     = 0x00002000,
171 	MODE_COS3                      = 0x00004000,
172 	MODE_COS6                      = 0x00008000,
173 	MODE_LITTLE_ENDIAN             = 0x00010000,
174 	MODE_BIG_ENDIAN                = 0x00020000,
175 };
176 
177 /* Init Blocks */
178 enum {
179 	BLOCK_ATC,
180 	BLOCK_BRB1,
181 	BLOCK_CCM,
182 	BLOCK_CDU,
183 	BLOCK_CFC,
184 	BLOCK_CSDM,
185 	BLOCK_CSEM,
186 	BLOCK_DBG,
187 	BLOCK_DMAE,
188 	BLOCK_DORQ,
189 	BLOCK_HC,
190 	BLOCK_IGU,
191 	BLOCK_MISC,
192 	BLOCK_NIG,
193 	BLOCK_PBF,
194 	BLOCK_PGLUE_B,
195 	BLOCK_PRS,
196 	BLOCK_PXP2,
197 	BLOCK_PXP,
198 	BLOCK_QM,
199 	BLOCK_SRC,
200 	BLOCK_TCM,
201 	BLOCK_TM,
202 	BLOCK_TSDM,
203 	BLOCK_TSEM,
204 	BLOCK_UCM,
205 	BLOCK_UPB,
206 	BLOCK_USDM,
207 	BLOCK_USEM,
208 	BLOCK_XCM,
209 	BLOCK_XPB,
210 	BLOCK_XSDM,
211 	BLOCK_XSEM,
212 	BLOCK_MISC_AEU,
213 	NUM_OF_INIT_BLOCKS
214 };
215 
216 
217 
218 
219 
220 
221 
222 
223 /* Vnics per mode */
224 #define ECORE_PORT2_MODE_NUM_VNICS 4
225 
226 
227 /* QM queue numbers */
228 #define ECORE_ETH_Q		0
229 #define ECORE_TOE_Q		3
230 #define ECORE_TOE_ACK_Q		6
231 #define ECORE_ISCSI_Q		9
232 #define ECORE_ISCSI_ACK_Q	11
233 #define ECORE_FCOE_Q		10
234 
235 /* Vnics per mode */
236 #define ECORE_PORT4_MODE_NUM_VNICS 2
237 
238 /* COS offset for port1 in E3 B0 4port mode */
239 #define ECORE_E3B0_PORT1_COS_OFFSET 3
240 
241 /* QM Register addresses */
242 #define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\
243 	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
244 #define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\
245 	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
246 #define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\
247 	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
248 
249 /* extracts the QM queue number for the specified port and vnic */
250 #define ECORE_PF_Q_NUM(q_num, port, vnic)\
251 	((((port) << 1) | (vnic)) * 16 + (q_num))
252 
253 
254 /* Maps the specified queue to the specified COS */
255 static inline void ecore_map_q_cos(struct bxe_softc *sc, uint32_t q_num, uint32_t new_cos)
256 {
257 	/* find current COS mapping */
258 	uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4);
259 
260 	/* check if queue->COS mapping has changed */
261 	if (curr_cos != new_cos) {
262 		uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS;
263 		uint32_t reg_addr, reg_bit_map, vnic;
264 
265 		/* update parameters for 4port mode */
266 		if (INIT_MODE_FLAGS(sc) & MODE_PORT4) {
267 			num_vnics = ECORE_PORT4_MODE_NUM_VNICS;
268 			if (PORT_ID(sc)) {
269 				curr_cos += ECORE_E3B0_PORT1_COS_OFFSET;
270 				new_cos += ECORE_E3B0_PORT1_COS_OFFSET;
271 			}
272 		}
273 
274 		/* change queue mapping for each VNIC */
275 		for (vnic = 0; vnic < num_vnics; vnic++) {
276 			uint32_t pf_q_num =
277 				ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic);
278 			uint32_t q_bit_map = 1 << (pf_q_num & 0x1f);
279 
280 			/* overwrite queue->VOQ mapping */
281 			REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
282 
283 			/* clear queue bit from current COS bit map */
284 			reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
285 			reg_bit_map = REG_RD(sc, reg_addr);
286 			REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map));
287 
288 			/* set queue bit in new COS bit map */
289 			reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
290 			reg_bit_map = REG_RD(sc, reg_addr);
291 			REG_WR(sc, reg_addr, reg_bit_map | q_bit_map);
292 
293 			/* set/clear queue bit in command-queue bit map
294 			(E2/E3A0 only, valid COS values are 0/1) */
295 			if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) {
296 				reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num);
297 				reg_bit_map = REG_RD(sc, reg_addr);
298 				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
299 				reg_bit_map = new_cos ?
300 					      (reg_bit_map | q_bit_map) :
301 					      (reg_bit_map & (~q_bit_map));
302 				REG_WR(sc, reg_addr, reg_bit_map);
303 			}
304 		}
305 	}
306 }
307 
308 /* Configures the QM according to the specified per-traffic-type COSes */
309 static inline void ecore_dcb_config_qm(struct bxe_softc *sc, enum cos_mode mode,
310 				       struct priority_cos *traffic_cos)
311 {
312 	ecore_map_q_cos(sc, ECORE_FCOE_Q,
313 			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
314 	ecore_map_q_cos(sc, ECORE_ISCSI_Q,
315 			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
316 	ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q,
317 		traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
318 	if (mode != STATIC_COS) {
319 		/* required only in OVERRIDE_COS mode */
320 		ecore_map_q_cos(sc, ECORE_ETH_Q,
321 				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
322 		ecore_map_q_cos(sc, ECORE_TOE_Q,
323 				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
324 		ecore_map_q_cos(sc, ECORE_TOE_ACK_Q,
325 				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
326 	}
327 }
328 
329 
330 /*
331  * congestion managment port init api description
332  * the api works as follows:
333  * the driver should pass the cmng_init_input struct, the port_init function
334  * will prepare the required internal ram structure which will be passed back
335  * to the driver (cmng_init) that will write it into the internal ram.
336  *
337  * IMPORTANT REMARKS:
338  * 1. the cmng_init struct does not represent the contiguous internal ram
339  *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
340  *    offset in order to write the port sub struct and the
341  *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
342  *    words - don't use memcpy!).
343  * 2. although the cmng_init struct is filled for the maximal vnic number
344  *    possible, the driver should only write the valid vnics into the internal
345  *    ram according to the appropriate port mode.
346  */
347 #define BITS_TO_BYTES(x) ((x)/8)
348 
349 /* CMNG constants, as derived from system spec calculations */
350 
351 /* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
352 #define DEF_MIN_RATE 100
353 
354 /* resolution of the rate shaping timer - 400 usec */
355 #define RS_PERIODIC_TIMEOUT_USEC 400
356 
357 /*
358  *  number of bytes in single QM arbitration cycle -
359  *  coefficient for calculating the fairness timer
360  */
361 #define QM_ARB_BYTES 160000
362 
363 /* resolution of Min algorithm 1:100 */
364 #define MIN_RES 100
365 
366 /*
367  *  how many bytes above threshold for
368  *  the minimal credit of Min algorithm
369  */
370 #define MIN_ABOVE_THRESH 32768
371 
372 /*
373  *  Fairness algorithm integration time coefficient -
374  *  for calculating the actual Tfair
375  */
376 #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
377 
378 /* Memory of fairness algorithm - 2 cycles */
379 #define FAIR_MEM 2
380 #define SAFC_TIMEOUT_USEC 52
381 
382 #define SDM_TICKS 4
383 
384 
385 static inline void ecore_init_max(const struct cmng_init_input *input_data,
386 				  uint32_t r_param, struct cmng_init *ram_data)
387 {
388 	uint32_t vnic;
389 	struct cmng_vnic *vdata = &ram_data->vnic;
390 	struct cmng_struct_per_port *pdata = &ram_data->port;
391 	/*
392 	 * rate shaping per-port variables
393 	 *  100 micro seconds in SDM ticks = 25
394 	 *  since each tick is 4 microSeconds
395 	 */
396 
397 	pdata->rs_vars.rs_periodic_timeout =
398 	RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
399 
400 	/* this is the threshold below which no timer arming will occur.
401 	 *  1.25 coefficient is for the threshold to be a little bigger
402 	 *  then the real time to compensate for timer in-accuracy
403 	 */
404 	pdata->rs_vars.rs_threshold =
405 	(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
406 
407 	/* rate shaping per-vnic variables */
408 	for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
409 		/* global vnic counter */
410 		vdata->vnic_max_rate[vnic].vn_counter.rate =
411 		input_data->vnic_max_rate[vnic];
412 		/*
413 		 * maximal Mbps for this vnic
414 		 * the quota in each timer period - number of bytes
415 		 * transmitted in this period
416 		 */
417 		vdata->vnic_max_rate[vnic].vn_counter.quota =
418 			RS_PERIODIC_TIMEOUT_USEC *
419 			(uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
420 	}
421 
422 }
423 
424 static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate,
425 				  struct rate_shaping_vars_per_vn *ram_data)
426 {
427 	/* global vnic counter */
428 	ram_data->vn_counter.rate = vnic_max_rate;
429 
430 	/*
431 	* maximal Mbps for this vnic
432 	* the quota in each timer period - number of bytes
433 	* transmitted in this period
434 	*/
435 	ram_data->vn_counter.quota =
436 		RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8;
437 }
438 
439 static inline void ecore_init_min(const struct cmng_init_input *input_data,
440 				  uint32_t r_param, struct cmng_init *ram_data)
441 {
442 	uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
443 	struct cmng_vnic *vdata = &ram_data->vnic;
444 	struct cmng_struct_per_port *pdata = &ram_data->port;
445 
446 	/* this is the resolution of the fairness timer */
447 	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
448 
449 	/*
450 	 * fairness per-port variables
451 	 * for 10G it is 1000usec. for 1G it is 10000usec.
452 	 */
453 	tFair = T_FAIR_COEF / input_data->port_rate;
454 
455 	/* this is the threshold below which we won't arm the timer anymore */
456 	pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
457 
458 	/*
459 	 *  we multiply by 1e3/8 to get bytes/msec. We don't want the credits
460 	 *  to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
461 	 */
462 	pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
463 
464 	/* since each tick is 4 microSeconds */
465 	pdata->fair_vars.fairness_timeout =
466 				fair_periodic_timeout_usec / SDM_TICKS;
467 
468 	/* calculate sum of weights */
469 	vnicWeightSum = 0;
470 
471 	for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++)
472 		vnicWeightSum += input_data->vnic_min_rate[vnic];
473 
474 	/* global vnic counter */
475 	if (vnicWeightSum > 0) {
476 		/* fairness per-vnic variables */
477 		for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
478 			/*
479 			 *  this is the credit for each period of the fairness
480 			 *  algorithm - number of bytes in T_FAIR (this vnic
481 			 *  share of the port rate)
482 			 */
483 			vdata->vnic_min_rate[vnic].vn_credit_delta =
484 				((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 *
485 				(T_FAIR_COEF / (8 * 100 * vnicWeightSum)));
486 			if (vdata->vnic_min_rate[vnic].vn_credit_delta <
487 			    pdata->fair_vars.fair_threshold +
488 			    MIN_ABOVE_THRESH) {
489 				vdata->vnic_min_rate[vnic].vn_credit_delta =
490 					pdata->fair_vars.fair_threshold +
491 					MIN_ABOVE_THRESH;
492 			}
493 		}
494 	}
495 }
496 
497 static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
498 				     uint32_t r_param, struct cmng_init *ram_data)
499 {
500 	uint32_t vnic, cos;
501 	uint32_t cosWeightSum = 0;
502 	struct cmng_vnic *vdata = &ram_data->vnic;
503 	struct cmng_struct_per_port *pdata = &ram_data->port;
504 
505 	for (cos = 0; cos < MAX_COS_NUMBER; cos++)
506 		cosWeightSum += input_data->cos_min_rate[cos];
507 
508 	if (cosWeightSum > 0) {
509 
510 		for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
511 			/*
512 			 *  Since cos and vnic shouldn't work together the rate
513 			 *  to divide between the coses is the port rate.
514 			 */
515 			uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
516 			for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
517 				/*
518 				 * this is the credit for each period of
519 				 * the fairness algorithm - number of bytes
520 				 * in T_FAIR (this cos share of the vnic rate)
521 				 */
522 				ccd[cos] =
523 				    ((uint32_t)input_data->cos_min_rate[cos] * 100 *
524 				    (T_FAIR_COEF / (8 * 100 * cosWeightSum)));
525 				 if (ccd[cos] < pdata->fair_vars.fair_threshold
526 						+ MIN_ABOVE_THRESH) {
527 					ccd[cos] =
528 					    pdata->fair_vars.fair_threshold +
529 					    MIN_ABOVE_THRESH;
530 				}
531 			}
532 		}
533 	}
534 }
535 
536 static inline void ecore_init_safc(const struct cmng_init_input *input_data,
537 				   struct cmng_init *ram_data)
538 {
539 	/* in microSeconds */
540 	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
541 }
542 
543 /* Congestion management port init */
544 static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
545 				   struct cmng_init *ram_data)
546 {
547 	uint32_t r_param;
548 	ECORE_MEMSET(ram_data, 0,sizeof(struct cmng_init));
549 
550 	ram_data->port.flags = input_data->flags;
551 
552 	/*
553 	 *  number of bytes transmitted in a rate of 10Gbps
554 	 *  in one usec = 1.25KB.
555 	 */
556 	r_param = BITS_TO_BYTES(input_data->port_rate);
557 	ecore_init_max(input_data, r_param, ram_data);
558 	ecore_init_min(input_data, r_param, ram_data);
559 	ecore_init_fw_wrr(input_data, r_param, ram_data);
560 	ecore_init_safc(input_data, ram_data);
561 }
562 
563 
564 
565 
566 /* Returns the index of start or end of a specific block stage in ops array*/
567 #define BLOCK_OPS_IDX(block, stage, end) \
568 			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
569 
570 
571 #define INITOP_SET		0	/* set the HW directly */
572 #define INITOP_CLEAR		1	/* clear the HW directly */
573 #define INITOP_INIT		2	/* set the init-value array */
574 
575 /****************************************************************************
576 * ILT management
577 ****************************************************************************/
578 struct ilt_line {
579 	ecore_dma_addr_t page_mapping;
580 	void *page;
581 	uint32_t size;
582 };
583 
584 struct ilt_client_info {
585 	uint32_t page_size;
586 	uint16_t start;
587 	uint16_t end;
588 	uint16_t client_num;
589 	uint16_t flags;
590 #define ILT_CLIENT_SKIP_INIT	0x1
591 #define ILT_CLIENT_SKIP_MEM	0x2
592 };
593 
594 struct ecore_ilt {
595 	uint32_t start_line;
596 	struct ilt_line		*lines;
597 	struct ilt_client_info	clients[4];
598 #define ILT_CLIENT_CDU	0
599 #define ILT_CLIENT_QM	1
600 #define ILT_CLIENT_SRC	2
601 #define ILT_CLIENT_TM	3
602 };
603 
604 /****************************************************************************
605 * SRC configuration
606 ****************************************************************************/
607 struct src_ent {
608 	uint8_t opaque[56];
609 	uint64_t next;
610 };
611 
612 /****************************************************************************
613 * Parity configuration
614 ****************************************************************************/
615 #define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
616 { \
617 	block##_REG_##block##_PRTY_MASK, \
618 	block##_REG_##block##_PRTY_STS_CLR, \
619 	en_mask, {m1, m1h, m2, m3}, #block \
620 }
621 
622 #define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
623 { \
624 	block##_REG_##block##_PRTY_MASK_0, \
625 	block##_REG_##block##_PRTY_STS_CLR_0, \
626 	en_mask, {m1, m1h, m2, m3}, #block"_0" \
627 }
628 
629 #define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
630 { \
631 	block##_REG_##block##_PRTY_MASK_1, \
632 	block##_REG_##block##_PRTY_STS_CLR_1, \
633 	en_mask, {m1, m1h, m2, m3}, #block"_1" \
634 }
635 
636 static const struct {
637 	uint32_t mask_addr;
638 	uint32_t sts_clr_addr;
639 	uint32_t en_mask;		/* Mask to enable parity attentions */
640 	struct {
641 		uint32_t e1;		/* 57710 */
642 		uint32_t e1h;	/* 57711 */
643 		uint32_t e2;		/* 57712 */
644 		uint32_t e3;		/* 578xx */
645 	} reg_mask;		/* Register mask (all valid bits) */
646 	char name[8];		/* Block's longest name is 7 characters long
647 				 * (name + suffix)
648 				 */
649 } ecore_blocks_parity_data[] = {
650 	/* bit 19 masked */
651 	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
652 	/* bit 5,18,20-31 */
653 	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
654 	/* bit 5 */
655 	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
656 	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
657 	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
658 
659 	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
660 	 * want to handle "system kill" flow at the moment.
661 	 */
662 	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
663 			0x7ffffff),
664 	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
665 			  0xffffffff),
666 	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
667 	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
668 	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
669 	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
670 	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xff, 0xffff),
671 	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
672 	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
673 	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
674 	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
675 	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
676 	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
677 	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
678 		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
679 		{0xf, 0xf, 0xf, 0xf}, "UPB"},
680 	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
681 		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
682 		{0xf, 0xf, 0xf, 0xf}, "XPB"},
683 	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
684 	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
685 	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
686 	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
687 	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
688 	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
689 	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
690 	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
691 	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
692 	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
693 	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
694 	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
695 	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
696 	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
697 	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
698 	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
699 	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
700 	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
701 			  0xffffffff),
702 	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
703 	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
704 			  0xffffffff),
705 	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
706 	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
707 			  0xffffffff),
708 	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
709 	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
710 			  0xffffffff),
711 	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
712 };
713 
714 
715 /* [28] MCP Latched rom_parity
716  * [29] MCP Latched ump_rx_parity
717  * [30] MCP Latched ump_tx_parity
718  * [31] MCP Latched scpad_parity
719  */
720 #define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS	\
721 	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
722 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
723 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
724 
725 #define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
726 	(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
727 	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
728 
729 /* Below registers control the MCP parity attention output. When
730  * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
731  * enabled, when cleared - disabled.
732  */
733 static const struct {
734 	uint32_t addr;
735 	uint32_t bits;
736 } mcp_attn_ctl_regs[] = {
737 	{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
738 		MISC_AEU_ENABLE_MCP_PRTY_BITS },
739 	{ MISC_REG_AEU_ENABLE4_NIG_0,
740 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
741 	{ MISC_REG_AEU_ENABLE4_PXP_0,
742 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
743 	{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
744 		MISC_AEU_ENABLE_MCP_PRTY_BITS },
745 	{ MISC_REG_AEU_ENABLE4_NIG_1,
746 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
747 	{ MISC_REG_AEU_ENABLE4_PXP_1,
748 		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
749 };
750 
751 static inline void ecore_set_mcp_parity(struct bxe_softc *sc, uint8_t enable)
752 {
753 	int i;
754 	uint32_t reg_val;
755 
756 	for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) {
757 		reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr);
758 
759 		if (enable)
760 			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */
761 		else
762 			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */
763 
764 		REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val);
765 	}
766 }
767 
768 static inline uint32_t ecore_parity_reg_mask(struct bxe_softc *sc, int idx)
769 {
770 	if (CHIP_IS_E1(sc))
771 		return ecore_blocks_parity_data[idx].reg_mask.e1;
772 	else if (CHIP_IS_E1H(sc))
773 		return ecore_blocks_parity_data[idx].reg_mask.e1h;
774 	else if (CHIP_IS_E2(sc))
775 		return ecore_blocks_parity_data[idx].reg_mask.e2;
776 	else /* CHIP_IS_E3 */
777 		return ecore_blocks_parity_data[idx].reg_mask.e3;
778 }
779 
780 static inline void ecore_disable_blocks_parity(struct bxe_softc *sc)
781 {
782 	int i;
783 
784 	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
785 		uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
786 
787 		if (dis_mask) {
788 			REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
789 			       dis_mask);
790 			ECORE_MSG(sc, "Setting parity mask "
791 						 "for %s to\t\t0x%x\n",
792 				    ecore_blocks_parity_data[i].name, dis_mask);
793 		}
794 	}
795 
796 	/* Disable MCP parity attentions */
797 	ecore_set_mcp_parity(sc, FALSE);
798 }
799 
800 /**
801  * Clear the parity error status registers.
802  */
803 static inline void ecore_clear_blocks_parity(struct bxe_softc *sc)
804 {
805 	int i;
806 	uint32_t reg_val, mcp_aeu_bits =
807 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
808 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
809 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
810 		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
811 
812 	/* Clear SEM_FAST parities */
813 	REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
814 	REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
815 	REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
816 	REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
817 
818 	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
819 		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
820 
821 		if (reg_mask) {
822 			reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
823 					 sts_clr_addr);
824 			if (reg_val & reg_mask)
825 				ECORE_MSG(sc,
826 					   "Parity errors in %s: 0x%x\n",
827 					   ecore_blocks_parity_data[i].name,
828 					   reg_val & reg_mask);
829 		}
830 	}
831 
832 	/* Check if there were parity attentions in MCP */
833 	reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
834 	if (reg_val & mcp_aeu_bits)
835 		ECORE_MSG(sc, "Parity error in MCP: 0x%x\n",
836 			   reg_val & mcp_aeu_bits);
837 
838 	/* Clear parity attentions in MCP:
839 	 * [7]  clears Latched rom_parity
840 	 * [8]  clears Latched ump_rx_parity
841 	 * [9]  clears Latched ump_tx_parity
842 	 * [10] clears Latched scpad_parity (both ports)
843 	 */
844 	REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
845 }
846 
847 static inline void ecore_enable_blocks_parity(struct bxe_softc *sc)
848 {
849 	int i;
850 
851 	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
852 		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
853 
854 		if (reg_mask)
855 			REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
856 				ecore_blocks_parity_data[i].en_mask & reg_mask);
857 	}
858 
859 	/* Enable MCP parity attentions */
860 	ecore_set_mcp_parity(sc, TRUE);
861 }
862 
863 
864 #endif /* ECORE_INIT_H */
865 
866