xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_zcp.c (revision 8548bf79039833dba8615afdf63258b2cb122121)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <nxge_impl.h>
29 #include <nxge_zcp.h>
30 #include <nxge_ipp.h>
31 
32 nxge_status_t
33 nxge_zcp_init(p_nxge_t nxgep)
34 {
35 	uint8_t portn;
36 	npi_handle_t handle;
37 	zcp_iconfig_t istatus;
38 	npi_status_t rs = NPI_SUCCESS;
39 	int i;
40 	zcp_ram_unit_t w_data;
41 	zcp_ram_unit_t r_data;
42 	uint32_t cfifo_depth;
43 
44 	handle = nxgep->npi_handle;
45 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
46 
47 	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
48 		if (portn < 2)
49 			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
50 		else
51 			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
52 	} else if (nxgep->niu_type == N2_NIU)
53 		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
54 
55 	/* Clean up CFIFO */
56 	w_data.w0 = 0;
57 	w_data.w1 = 0;
58 	w_data.w2 = 0;
59 	w_data.w3 = 0;
60 	w_data.w4 = 0;
61 
62 	for (i = 0; i < cfifo_depth; i++) {
63 		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
64 				portn, i, &w_data) != NPI_SUCCESS)
65 			goto fail;
66 		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
67 				portn, i, &r_data) != NPI_SUCCESS)
68 			goto fail;
69 	}
70 
71 	if (npi_zcp_rest_cfifo_port(handle, portn) != NPI_SUCCESS)
72 		goto fail;
73 
74 	/*
75 	 * Making sure that error source is cleared if this is an injected
76 	 * error.
77 	 */
78 	switch (portn) {
79 	case 0:
80 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
81 		break;
82 	case 1:
83 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
84 		break;
85 	case 2:
86 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
87 		break;
88 	case 3:
89 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
90 		break;
91 	}
92 
93 	if ((rs = npi_zcp_clear_istatus(handle)) != NPI_SUCCESS)
94 		return (NXGE_ERROR | rs);
95 	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
96 		return (NXGE_ERROR | rs);
97 	if ((rs = npi_zcp_iconfig(handle, INIT, ICFG_ZCP_ALL)) != NPI_SUCCESS)
98 		goto fail;
99 
100 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_init: port%d", portn));
101 	return (NXGE_OK);
102 
103 fail:
104 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
105 		"nxge_zcp_init: Fail to initialize ZCP Port #%d\n", portn));
106 	return (NXGE_ERROR | rs);
107 }
108 
109 nxge_status_t
110 nxge_zcp_handle_sys_errors(p_nxge_t nxgep)
111 {
112 	npi_handle_t handle;
113 	npi_status_t rs = NPI_SUCCESS;
114 	p_nxge_zcp_stats_t statsp;
115 	uint8_t portn;
116 	zcp_iconfig_t istatus;
117 	boolean_t rxport_fatal = B_FALSE;
118 	nxge_status_t status = NXGE_OK;
119 
120 	handle = nxgep->npi_handle;
121 	statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats;
122 	portn = nxgep->mac.portnum;
123 
124 	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
125 		return (NXGE_ERROR | rs);
126 
127 	if (istatus & ICFG_ZCP_RRFIFO_UNDERRUN) {
128 		statsp->rrfifo_underrun++;
129 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
130 			NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN);
131 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
132 			"nxge_zcp_err_evnts: rrfifo_underrun"));
133 	}
134 
135 	if (istatus & ICFG_ZCP_RRFIFO_OVERRUN) {
136 		statsp->rrfifo_overrun++;
137 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
138 			NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN);
139 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
140 			"nxge_zcp_err_evnts: buf_rrfifo_overrun"));
141 	}
142 
143 	if (istatus & ICFG_ZCP_RSPFIFO_UNCORR_ERR) {
144 		statsp->rspfifo_uncorr_err++;
145 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
146 			NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR);
147 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
148 			"nxge_zcp_err_evnts: rspfifo_uncorr_err"));
149 	}
150 
151 	if (istatus & ICFG_ZCP_BUFFER_OVERFLOW) {
152 		statsp->buffer_overflow++;
153 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
154 			NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW);
155 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
156 			"nxge_zcp_err_evnts: buffer_overflow"));
157 		rxport_fatal = B_TRUE;
158 	}
159 
160 	if (istatus & ICFG_ZCP_STAT_TBL_PERR) {
161 		statsp->stat_tbl_perr++;
162 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
163 			NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR);
164 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
165 			"nxge_zcp_err_evnts: stat_tbl_perr"));
166 	}
167 
168 	if (istatus & ICFG_ZCP_DYN_TBL_PERR) {
169 		statsp->dyn_tbl_perr++;
170 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
171 			NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR);
172 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
173 			"nxge_zcp_err_evnts: dyn_tbl_perr"));
174 	}
175 
176 	if (istatus & ICFG_ZCP_BUF_TBL_PERR) {
177 		statsp->buf_tbl_perr++;
178 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
179 			NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR);
180 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
181 			"nxge_zcp_err_evnts: buf_tbl_perr"));
182 	}
183 
184 	if (istatus & ICFG_ZCP_TT_PROGRAM_ERR) {
185 		statsp->tt_program_err++;
186 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
187 			NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR);
188 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
189 			"nxge_zcp_err_evnts: tt_program_err"));
190 	}
191 
192 	if (istatus & ICFG_ZCP_RSP_TT_INDEX_ERR) {
193 		statsp->rsp_tt_index_err++;
194 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
195 			NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR);
196 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
197 			"nxge_zcp_err_evnts: rsp_tt_index_err"));
198 	}
199 
200 	if (istatus & ICFG_ZCP_SLV_TT_INDEX_ERR) {
201 		statsp->slv_tt_index_err++;
202 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
203 			NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR);
204 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
205 			"nxge_zcp_err_evnts: slv_tt_index_err"));
206 	}
207 
208 	if (istatus & ICFG_ZCP_TT_INDEX_ERR) {
209 		statsp->zcp_tt_index_err++;
210 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
211 			NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR);
212 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
213 			"nxge_zcp_err_evnts: tt_index_err"));
214 	}
215 
216 	if (((portn == 0) && (istatus & ICFG_ZCP_CFIFO_ECC0)) ||
217 			((portn == 1) && (istatus & ICFG_ZCP_CFIFO_ECC1)) ||
218 			((portn == 2) && (istatus & ICFG_ZCP_CFIFO_ECC2)) ||
219 			((portn == 3) && (istatus & ICFG_ZCP_CFIFO_ECC3))) {
220 		boolean_t ue_ecc_valid;
221 
222 		if ((status = nxge_ipp_eccue_valid_check(nxgep,
223 				&ue_ecc_valid)) != NXGE_OK)
224 			return (status);
225 
226 		if (ue_ecc_valid) {
227 			statsp->cfifo_ecc++;
228 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
229 				NXGE_FM_EREPORT_ZCP_CFIFO_ECC);
230 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
231 				"nxge_zcp_err_evnts: port%d buf_cfifo_ecc",
232 				portn));
233 			rxport_fatal = B_TRUE;
234 		}
235 	}
236 
237 	/*
238 	 * Making sure that error source is cleared if this is an injected
239 	 * error.
240 	 */
241 	switch (portn) {
242 	case 0:
243 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
244 		break;
245 	case 1:
246 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
247 		break;
248 	case 2:
249 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
250 		break;
251 	case 3:
252 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
253 		break;
254 	}
255 
256 	(void) npi_zcp_clear_istatus(handle);
257 
258 	if (rxport_fatal) {
259 		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
260 			" nxge_zcp_handle_sys_errors:"
261 			" fatal Error on  Port #%d\n", portn));
262 		status = nxge_zcp_fatal_err_recover(nxgep);
263 		if (status == NXGE_OK) {
264 			FM_SERVICE_RESTORED(nxgep);
265 		}
266 	}
267 	return (status);
268 }
269 
270 void
271 nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t err_id)
272 {
273 	zcp_int_stat_reg_t zcps;
274 	uint8_t portn = nxgep->mac.portnum;
275 	zcp_ecc_ctrl_t ecc_ctrl;
276 
277 	switch (err_id) {
278 	case NXGE_FM_EREPORT_ZCP_CFIFO_ECC:
279 		ecc_ctrl.value = 0;
280 		ecc_ctrl.bits.w0.cor_dbl = 1;
281 		ecc_ctrl.bits.w0.cor_lst = 1;
282 		ecc_ctrl.bits.w0.cor_all = 0;
283 		switch (portn) {
284 		case 0:
285 			cmn_err(CE_NOTE,
286 				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
287 				(unsigned long long) ecc_ctrl.value, portn);
288 			NXGE_REG_WR64(nxgep->npi_handle,
289 				ZCP_CFIFO_ECC_PORT0_REG,
290 				ecc_ctrl.value);
291 			break;
292 		case 1:
293 			cmn_err(CE_NOTE,
294 				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
295 				(unsigned long long) ecc_ctrl.value, portn);
296 			NXGE_REG_WR64(nxgep->npi_handle,
297 				ZCP_CFIFO_ECC_PORT1_REG,
298 				ecc_ctrl.value);
299 			break;
300 		case 2:
301 			cmn_err(CE_NOTE,
302 				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
303 				(unsigned long long) ecc_ctrl.value, portn);
304 			NXGE_REG_WR64(nxgep->npi_handle,
305 				ZCP_CFIFO_ECC_PORT2_REG,
306 				ecc_ctrl.value);
307 			break;
308 		case 3:
309 			cmn_err(CE_NOTE,
310 				"!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
311 				(unsigned long long) ecc_ctrl.value, portn);
312 			NXGE_REG_WR64(nxgep->npi_handle,
313 				ZCP_CFIFO_ECC_PORT3_REG,
314 				ecc_ctrl.value);
315 			break;
316 		}
317 		break;
318 
319 	case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN:
320 	case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR:
321 	case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR:
322 	case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR:
323 	case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR:
324 	case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN:
325 	case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW:
326 	case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR:
327 	case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR:
328 	case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR:
329 	case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR:
330 		NXGE_REG_RD64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
331 			&zcps.value);
332 		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN)
333 			zcps.bits.ldw.rrfifo_urun = 1;
334 		if (err_id == NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR)
335 			zcps.bits.ldw.rspfifo_uc_err = 1;
336 		if (err_id == NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR)
337 			zcps.bits.ldw.stat_tbl_perr = 1;
338 		if (err_id == NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR)
339 			zcps.bits.ldw.dyn_tbl_perr = 1;
340 		if (err_id == NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR)
341 			zcps.bits.ldw.buf_tbl_perr = 1;
342 		if (err_id == NXGE_FM_EREPORT_ZCP_CFIFO_ECC) {
343 			switch (portn) {
344 			case 0:
345 				zcps.bits.ldw.cfifo_ecc0 = 1;
346 				break;
347 			case 1:
348 				zcps.bits.ldw.cfifo_ecc1 = 1;
349 				break;
350 			case 2:
351 				zcps.bits.ldw.cfifo_ecc2 = 1;
352 				break;
353 			case 3:
354 				zcps.bits.ldw.cfifo_ecc3 = 1;
355 				break;
356 			}
357 		}
358 
359 	default:
360 		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN)
361 			zcps.bits.ldw.rrfifo_orun = 1;
362 		if (err_id == NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW)
363 			zcps.bits.ldw.buf_overflow = 1;
364 		if (err_id == NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR)
365 			zcps.bits.ldw.tt_tbl_perr = 1;
366 		if (err_id == NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR)
367 			zcps.bits.ldw.rsp_tt_index_err = 1;
368 		if (err_id == NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR)
369 			zcps.bits.ldw.slv_tt_index_err = 1;
370 		if (err_id == NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR)
371 			zcps.bits.ldw.zcp_tt_index_err = 1;
372 		cmn_err(CE_NOTE, "!Write 0x%lx to ZCP_INT_STAT_TEST_REG\n",
373 			zcps.value);
374 		NXGE_REG_WR64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
375 			zcps.value);
376 		break;
377 	}
378 }
379 
380 nxge_status_t
381 nxge_zcp_fatal_err_recover(p_nxge_t nxgep)
382 {
383 	npi_handle_t handle;
384 	npi_status_t rs = NPI_SUCCESS;
385 	nxge_status_t status = NXGE_OK;
386 	uint8_t portn;
387 	zcp_ram_unit_t w_data;
388 	zcp_ram_unit_t r_data;
389 	uint32_t cfifo_depth;
390 	int i;
391 
392 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_zcp_fatal_err_recover"));
393 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
394 		"Recovering from RxPort error..."));
395 
396 	handle = nxgep->npi_handle;
397 	portn = nxgep->mac.portnum;
398 
399 	/* Disable RxMAC */
400 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
401 		goto fail;
402 
403 	/* Make sure source is clear if this is an injected error */
404 	switch (portn) {
405 	case 0:
406 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
407 		break;
408 	case 1:
409 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
410 		break;
411 	case 2:
412 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
413 		break;
414 	case 3:
415 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
416 		break;
417 	}
418 
419 	/* Clear up CFIFO */
420 	if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) {
421 		if (portn < 2)
422 			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
423 		else
424 			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
425 	} else if (nxgep->niu_type == N2_NIU)
426 		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
427 
428 	w_data.w0 = 0;
429 	w_data.w1 = 0;
430 	w_data.w2 = 0;
431 	w_data.w3 = 0;
432 	w_data.w4 = 0;
433 
434 	for (i = 0; i < cfifo_depth; i++) {
435 		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
436 				portn, i, &w_data) != NPI_SUCCESS)
437 			goto fail;
438 		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
439 				portn, i, &r_data) != NPI_SUCCESS)
440 			goto fail;
441 	}
442 
443 	/* When recovering from ZCP, RxDMA channel resets are not necessary */
444 	/* Reset ZCP CFIFO */
445 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset ZCP CFIFO...", portn));
446 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
447 		goto fail;
448 
449 	/* Reset IPP */
450 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset IPP...", portn));
451 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
452 		goto fail;
453 
454 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset RxMAC...", portn));
455 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
456 		goto fail;
457 
458 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Initialize RxMAC...", portn));
459 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
460 		goto fail;
461 
462 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Enable RxMAC...", portn));
463 	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
464 		goto fail;
465 
466 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
467 		"Recovery Sucessful, RxPort Restored"));
468 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_fatal_err_recover"));
469 	return (NXGE_OK);
470 fail:
471 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
472 	return (status | rs);
473 }
474