xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_zcp.c (revision 03270635d68df6a0392fb8f4b7c04acad764648b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <nxge_impl.h>
27 #include <nxge_zcp.h>
28 #include <nxge_ipp.h>
29 
30 nxge_status_t
31 nxge_zcp_init(p_nxge_t nxgep)
32 {
33 	uint8_t portn;
34 	npi_handle_t handle;
35 	zcp_iconfig_t istatus;
36 	npi_status_t rs = NPI_SUCCESS;
37 	int i;
38 	zcp_ram_unit_t w_data;
39 	zcp_ram_unit_t r_data;
40 	uint32_t cfifo_depth;
41 
42 	handle = nxgep->npi_handle;
43 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
44 
45 	if (nxgep->niu_type == N2_NIU) {
46 		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
47 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
48 		if (portn < 2)
49 			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
50 		else
51 			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
52 	} else {
53 		goto fail;
54 	}
55 
56 	/* Clean up CFIFO */
57 	w_data.w0 = 0;
58 	w_data.w1 = 0;
59 	w_data.w2 = 0;
60 	w_data.w3 = 0;
61 	w_data.w4 = 0;
62 
63 	for (i = 0; i < cfifo_depth; i++) {
64 		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
65 		    portn, i, &w_data) != NPI_SUCCESS)
66 			goto fail;
67 		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
68 		    portn, i, &r_data) != NPI_SUCCESS)
69 			goto fail;
70 	}
71 
72 	if (npi_zcp_rest_cfifo_port(handle, portn) != NPI_SUCCESS)
73 		goto fail;
74 
75 	/*
76 	 * Making sure that error source is cleared if this is an injected
77 	 * error.
78 	 */
79 	switch (portn) {
80 	case 0:
81 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
82 		break;
83 	case 1:
84 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
85 		break;
86 	case 2:
87 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
88 		break;
89 	case 3:
90 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
91 		break;
92 	}
93 
94 	if ((rs = npi_zcp_clear_istatus(handle)) != NPI_SUCCESS)
95 		return (NXGE_ERROR | rs);
96 	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
97 		return (NXGE_ERROR | rs);
98 	if ((rs = npi_zcp_iconfig(handle, INIT, ICFG_ZCP_ALL)) != NPI_SUCCESS)
99 		goto fail;
100 
101 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_init: port%d", portn));
102 	return (NXGE_OK);
103 
104 fail:
105 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
106 	    "nxge_zcp_init: Fail to initialize ZCP Port #%d\n", portn));
107 	return (NXGE_ERROR | rs);
108 }
109 
110 nxge_status_t
111 nxge_zcp_handle_sys_errors(p_nxge_t nxgep)
112 {
113 	npi_handle_t handle;
114 	npi_status_t rs = NPI_SUCCESS;
115 	p_nxge_zcp_stats_t statsp;
116 	uint8_t portn;
117 	zcp_iconfig_t istatus;
118 	boolean_t rxport_fatal = B_FALSE;
119 	nxge_status_t status = NXGE_OK;
120 
121 	handle = nxgep->npi_handle;
122 	statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats;
123 	portn = nxgep->mac.portnum;
124 
125 	if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS)
126 		return (NXGE_ERROR | rs);
127 
128 	if (istatus & ICFG_ZCP_RRFIFO_UNDERRUN) {
129 		statsp->rrfifo_underrun++;
130 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
131 		    NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN);
132 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
133 		    "nxge_zcp_err_evnts: rrfifo_underrun"));
134 	}
135 
136 	if (istatus & ICFG_ZCP_RRFIFO_OVERRUN) {
137 		statsp->rrfifo_overrun++;
138 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
139 		    NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN);
140 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
141 		    "nxge_zcp_err_evnts: buf_rrfifo_overrun"));
142 	}
143 
144 	if (istatus & ICFG_ZCP_RSPFIFO_UNCORR_ERR) {
145 		statsp->rspfifo_uncorr_err++;
146 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
147 		    NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR);
148 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
149 		    "nxge_zcp_err_evnts: rspfifo_uncorr_err"));
150 	}
151 
152 	if (istatus & ICFG_ZCP_BUFFER_OVERFLOW) {
153 		statsp->buffer_overflow++;
154 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
155 		    NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW);
156 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
157 		    "nxge_zcp_err_evnts: buffer_overflow"));
158 		rxport_fatal = B_TRUE;
159 	}
160 
161 	if (istatus & ICFG_ZCP_STAT_TBL_PERR) {
162 		statsp->stat_tbl_perr++;
163 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
164 		    NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR);
165 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
166 		    "nxge_zcp_err_evnts: stat_tbl_perr"));
167 	}
168 
169 	if (istatus & ICFG_ZCP_DYN_TBL_PERR) {
170 		statsp->dyn_tbl_perr++;
171 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
172 		    NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR);
173 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
174 		    "nxge_zcp_err_evnts: dyn_tbl_perr"));
175 	}
176 
177 	if (istatus & ICFG_ZCP_BUF_TBL_PERR) {
178 		statsp->buf_tbl_perr++;
179 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
180 		    NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR);
181 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
182 		    "nxge_zcp_err_evnts: buf_tbl_perr"));
183 	}
184 
185 	if (istatus & ICFG_ZCP_TT_PROGRAM_ERR) {
186 		statsp->tt_program_err++;
187 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
188 		    NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR);
189 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
190 		    "nxge_zcp_err_evnts: tt_program_err"));
191 	}
192 
193 	if (istatus & ICFG_ZCP_RSP_TT_INDEX_ERR) {
194 		statsp->rsp_tt_index_err++;
195 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
196 		    NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR);
197 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
198 		    "nxge_zcp_err_evnts: rsp_tt_index_err"));
199 	}
200 
201 	if (istatus & ICFG_ZCP_SLV_TT_INDEX_ERR) {
202 		statsp->slv_tt_index_err++;
203 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
204 		    NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR);
205 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
206 		    "nxge_zcp_err_evnts: slv_tt_index_err"));
207 	}
208 
209 	if (istatus & ICFG_ZCP_TT_INDEX_ERR) {
210 		statsp->zcp_tt_index_err++;
211 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
212 		    NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR);
213 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
214 		    "nxge_zcp_err_evnts: tt_index_err"));
215 	}
216 
217 	if (((portn == 0) && (istatus & ICFG_ZCP_CFIFO_ECC0)) ||
218 	    ((portn == 1) && (istatus & ICFG_ZCP_CFIFO_ECC1)) ||
219 	    ((portn == 2) && (istatus & ICFG_ZCP_CFIFO_ECC2)) ||
220 	    ((portn == 3) && (istatus & ICFG_ZCP_CFIFO_ECC3))) {
221 		boolean_t ue_ecc_valid;
222 
223 		if ((status = nxge_ipp_eccue_valid_check(nxgep,
224 		    &ue_ecc_valid)) != NXGE_OK)
225 			return (status);
226 
227 		if (ue_ecc_valid) {
228 			statsp->cfifo_ecc++;
229 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
230 			    NXGE_FM_EREPORT_ZCP_CFIFO_ECC);
231 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
232 			    "nxge_zcp_err_evnts: port%d buf_cfifo_ecc",
233 			    portn));
234 			rxport_fatal = B_TRUE;
235 		}
236 	}
237 
238 	/*
239 	 * Making sure that error source is cleared if this is an injected
240 	 * error.
241 	 */
242 	switch (portn) {
243 	case 0:
244 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
245 		break;
246 	case 1:
247 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
248 		break;
249 	case 2:
250 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
251 		break;
252 	case 3:
253 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
254 		break;
255 	}
256 
257 	(void) npi_zcp_clear_istatus(handle);
258 
259 	if (rxport_fatal) {
260 		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
261 		    " nxge_zcp_handle_sys_errors:"
262 		    " fatal Error on  Port #%d\n", portn));
263 		status = nxge_zcp_fatal_err_recover(nxgep);
264 		if (status == NXGE_OK) {
265 			FM_SERVICE_RESTORED(nxgep);
266 		}
267 	}
268 	return (status);
269 }
270 
271 void
272 nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t err_id)
273 {
274 	zcp_int_stat_reg_t zcps;
275 	uint8_t portn = nxgep->mac.portnum;
276 	zcp_ecc_ctrl_t ecc_ctrl;
277 
278 	switch (err_id) {
279 	case NXGE_FM_EREPORT_ZCP_CFIFO_ECC:
280 		ecc_ctrl.value = 0;
281 		ecc_ctrl.bits.w0.cor_dbl = 1;
282 		ecc_ctrl.bits.w0.cor_lst = 1;
283 		ecc_ctrl.bits.w0.cor_all = 0;
284 		switch (portn) {
285 		case 0:
286 			cmn_err(CE_NOTE,
287 			    "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
288 			    (unsigned long long) ecc_ctrl.value, portn);
289 			NXGE_REG_WR64(nxgep->npi_handle,
290 			    ZCP_CFIFO_ECC_PORT0_REG,
291 			    ecc_ctrl.value);
292 			break;
293 		case 1:
294 			cmn_err(CE_NOTE,
295 			    "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
296 			    (unsigned long long) ecc_ctrl.value, portn);
297 			NXGE_REG_WR64(nxgep->npi_handle,
298 			    ZCP_CFIFO_ECC_PORT1_REG,
299 			    ecc_ctrl.value);
300 			break;
301 		case 2:
302 			cmn_err(CE_NOTE,
303 			    "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
304 			    (unsigned long long) ecc_ctrl.value, portn);
305 			NXGE_REG_WR64(nxgep->npi_handle,
306 			    ZCP_CFIFO_ECC_PORT2_REG,
307 			    ecc_ctrl.value);
308 			break;
309 		case 3:
310 			cmn_err(CE_NOTE,
311 			    "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n",
312 			    (unsigned long long) ecc_ctrl.value, portn);
313 			NXGE_REG_WR64(nxgep->npi_handle,
314 			    ZCP_CFIFO_ECC_PORT3_REG,
315 			    ecc_ctrl.value);
316 			break;
317 		}
318 		break;
319 
320 	case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN:
321 	case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR:
322 	case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR:
323 	case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR:
324 	case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR:
325 	case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN:
326 	case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW:
327 	case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR:
328 	case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR:
329 	case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR:
330 	case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR:
331 		NXGE_REG_RD64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
332 		    &zcps.value);
333 		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN)
334 			zcps.bits.ldw.rrfifo_urun = 1;
335 		if (err_id == NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR)
336 			zcps.bits.ldw.rspfifo_uc_err = 1;
337 		if (err_id == NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR)
338 			zcps.bits.ldw.stat_tbl_perr = 1;
339 		if (err_id == NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR)
340 			zcps.bits.ldw.dyn_tbl_perr = 1;
341 		if (err_id == NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR)
342 			zcps.bits.ldw.buf_tbl_perr = 1;
343 		if (err_id == NXGE_FM_EREPORT_ZCP_CFIFO_ECC) {
344 			switch (portn) {
345 			case 0:
346 				zcps.bits.ldw.cfifo_ecc0 = 1;
347 				break;
348 			case 1:
349 				zcps.bits.ldw.cfifo_ecc1 = 1;
350 				break;
351 			case 2:
352 				zcps.bits.ldw.cfifo_ecc2 = 1;
353 				break;
354 			case 3:
355 				zcps.bits.ldw.cfifo_ecc3 = 1;
356 				break;
357 			}
358 		}
359 		/* FALLTHROUGH */
360 
361 	default:
362 		if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN)
363 			zcps.bits.ldw.rrfifo_orun = 1;
364 		if (err_id == NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW)
365 			zcps.bits.ldw.buf_overflow = 1;
366 		if (err_id == NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR)
367 			zcps.bits.ldw.tt_tbl_perr = 1;
368 		if (err_id == NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR)
369 			zcps.bits.ldw.rsp_tt_index_err = 1;
370 		if (err_id == NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR)
371 			zcps.bits.ldw.slv_tt_index_err = 1;
372 		if (err_id == NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR)
373 			zcps.bits.ldw.zcp_tt_index_err = 1;
374 #if defined(__i386)
375 		cmn_err(CE_NOTE, "!Write 0x%llx to ZCP_INT_STAT_TEST_REG\n",
376 		    zcps.value);
377 #else
378 		cmn_err(CE_NOTE, "!Write 0x%lx to ZCP_INT_STAT_TEST_REG\n",
379 		    zcps.value);
380 #endif
381 		NXGE_REG_WR64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG,
382 		    zcps.value);
383 		break;
384 	}
385 }
386 
387 nxge_status_t
388 nxge_zcp_fatal_err_recover(p_nxge_t nxgep)
389 {
390 	npi_handle_t handle;
391 	npi_status_t rs = NPI_SUCCESS;
392 	nxge_status_t status = NXGE_OK;
393 	uint8_t portn;
394 	zcp_ram_unit_t w_data;
395 	zcp_ram_unit_t r_data;
396 	uint32_t cfifo_depth;
397 	int i;
398 
399 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_zcp_fatal_err_recover"));
400 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
401 	    "Recovering from RxPort error..."));
402 
403 	handle = nxgep->npi_handle;
404 	portn = nxgep->mac.portnum;
405 
406 	/* Disable RxMAC */
407 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
408 		goto fail;
409 
410 	/* Make sure source is clear if this is an injected error */
411 	switch (portn) {
412 	case 0:
413 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0);
414 		break;
415 	case 1:
416 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0);
417 		break;
418 	case 2:
419 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0);
420 		break;
421 	case 3:
422 		NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0);
423 		break;
424 	}
425 
426 	/* Clear up CFIFO */
427 	if (nxgep->niu_type == N2_NIU) {
428 		cfifo_depth = ZCP_NIU_CFIFO_DEPTH;
429 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
430 		if (portn < 2)
431 			cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH;
432 		else
433 			cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH;
434 	} else {
435 		goto fail;
436 	}
437 
438 	w_data.w0 = 0;
439 	w_data.w1 = 0;
440 	w_data.w2 = 0;
441 	w_data.w3 = 0;
442 	w_data.w4 = 0;
443 
444 	for (i = 0; i < cfifo_depth; i++) {
445 		if (npi_zcp_tt_cfifo_entry(handle, OP_SET,
446 		    portn, i, &w_data) != NPI_SUCCESS)
447 			goto fail;
448 		if (npi_zcp_tt_cfifo_entry(handle, OP_GET,
449 		    portn, i, &r_data) != NPI_SUCCESS)
450 			goto fail;
451 	}
452 
453 	/* When recovering from ZCP, RxDMA channel resets are not necessary */
454 	/* Reset ZCP CFIFO */
455 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset ZCP CFIFO...", portn));
456 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
457 		goto fail;
458 
459 	/* Reset IPP */
460 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset IPP...", portn));
461 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
462 		goto fail;
463 
464 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset RxMAC...", portn));
465 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
466 		goto fail;
467 
468 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Initialize RxMAC...", portn));
469 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
470 		goto fail;
471 
472 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Enable RxMAC...", portn));
473 	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
474 		goto fail;
475 
476 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
477 	    "Recovery Sucessful, RxPort Restored"));
478 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_fatal_err_recover"));
479 	return (NXGE_OK);
480 fail:
481 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
482 	return (status | rs);
483 }
484