xref: /titanic_50/usr/src/uts/common/io/nxge/nxge_ipp.c (revision cd3e933325e68e23516a196a8fea7f49b1e497c3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <nxge_impl.h>
29 #include <nxge_ipp.h>
30 
31 #define	NXGE_IPP_FIFO_SYNC_TRY_COUNT 100
32 
33 /* ARGSUSED */
34 nxge_status_t
35 nxge_ipp_init(p_nxge_t nxgep)
36 {
37 	uint8_t portn;
38 	uint32_t config;
39 	npi_handle_t handle;
40 	uint32_t pkt_size;
41 	ipp_status_t istatus;
42 	npi_status_t rs = NPI_SUCCESS;
43 	uint64_t val;
44 	uint32_t d0, d1, d2, d3, d4;
45 	int i;
46 	uint32_t dfifo_entries;
47 
48 	handle = nxgep->npi_handle;
49 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
50 
51 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn));
52 
53 	/* Initialize ECC and parity in SRAM of DFIFO and PFIFO */
54 	if (nxgep->niu_type == N2_NIU) {
55 		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
56 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
57 		if (portn < 2)
58 			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
59 		else
60 			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
61 	} else {
62 		goto fail;
63 	}
64 
65 	for (i = 0; i < dfifo_entries; i++) {
66 		if ((rs = npi_ipp_write_dfifo(handle,
67 		    portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
68 			goto fail;
69 		if ((rs = npi_ipp_read_dfifo(handle, portn,
70 		    i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
71 			goto fail;
72 	}
73 
74 	/* Clear PFIFO DFIFO status bits */
75 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
76 		goto fail;
77 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
78 		goto fail;
79 
80 	/*
81 	 * Soft reset to make sure we bring the FIFO pointers back to the
82 	 * original initial position.
83 	 */
84 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
85 		goto fail;
86 
87 	/* Clean up ECC counter */
88 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val);
89 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_BAD_CKSUM_ERR_CNT_REG, &val);
90 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val);
91 
92 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
93 		goto fail;
94 
95 	/* Configure IPP port */
96 	if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL))
97 	    != NPI_SUCCESS)
98 		goto fail;
99 	nxgep->ipp.iconfig = ICFG_IPP_ALL;
100 
101 	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
102 	    CFG_IPP_TCP_UDP_CKSUM;
103 	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
104 		goto fail;
105 	nxgep->ipp.config = config;
106 
107 	/* Set max packet size */
108 	pkt_size = IPP_MAX_PKT_SIZE;
109 	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
110 	    IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
111 		goto fail;
112 	nxgep->ipp.max_pkt_size = pkt_size;
113 
114 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn));
115 
116 	return (NXGE_OK);
117 fail:
118 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
119 	    "nxge_ipp_init: Fail to initialize IPP Port #%d\n",
120 	    portn));
121 	return (NXGE_ERROR | rs);
122 }
123 
124 /* ARGSUSED */
125 nxge_status_t
126 nxge_ipp_disable(p_nxge_t nxgep)
127 {
128 	uint8_t portn;
129 	uint32_t config;
130 	npi_handle_t handle;
131 	npi_status_t rs = NPI_SUCCESS;
132 	uint16_t wr_ptr, rd_ptr;
133 	uint32_t try_count;
134 
135 	handle = nxgep->npi_handle;
136 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
137 
138 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn));
139 	(void) nxge_rx_mac_disable(nxgep);
140 
141 	/*
142 	 * Wait until ip read and write fifo pointers are equal
143 	 */
144 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
145 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
146 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
147 
148 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
149 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
150 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
151 		try_count--;
152 	}
153 
154 	if (try_count == 0) {
155 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
156 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
157 			    " nxge_ipp_disable: port%d failed"
158 			    " rd_fifo != wr_fifo", portn));
159 			goto fail;
160 		}
161 	}
162 	/* disable the IPP */
163 	config = nxgep->ipp.config;
164 	if ((rs = npi_ipp_config(handle, DISABLE,
165 	    portn, config)) != NPI_SUCCESS)
166 		goto fail;
167 
168 	/* IPP soft reset */
169 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
170 		goto fail;
171 
172 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn));
173 	return (NXGE_OK);
174 fail:
175 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
176 	    "nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn));
177 	return (NXGE_ERROR | rs);
178 }
179 
180 /* ARGSUSED */
181 nxge_status_t
182 nxge_ipp_reset(p_nxge_t nxgep)
183 {
184 	uint8_t portn;
185 	uint32_t config;
186 	npi_handle_t handle;
187 	npi_status_t rs = NPI_SUCCESS;
188 	uint16_t wr_ptr, rd_ptr;
189 	uint32_t try_count;
190 
191 	handle = nxgep->npi_handle;
192 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
193 
194 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn));
195 
196 	/* disable the IPP */
197 	config = nxgep->ipp.config;
198 	if ((rs = npi_ipp_config(handle, DISABLE,
199 	    portn, config)) != NPI_SUCCESS)
200 		goto fail;
201 
202 	/*
203 	 * Wait until ip read and write fifo pointers are equal
204 	 */
205 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
206 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
207 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
208 
209 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
210 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
211 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
212 		try_count--;
213 	}
214 
215 	if (try_count == 0) {
216 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
217 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
218 			    " nxge_ipp_disable: port%d failed"
219 			    " rd_fifo != wr_fifo", portn));
220 			goto fail;
221 		}
222 	}
223 
224 	/* IPP soft reset */
225 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) {
226 		goto fail;
227 	}
228 
229 	/* to reset control FIFO */
230 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
231 		goto fail;
232 
233 	/*
234 	 * Making sure that error source is cleared if this is an injected
235 	 * error.
236 	 */
237 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
238 
239 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn));
240 	return (NXGE_OK);
241 fail:
242 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
243 	    "nxge_ipp_init: Fail to Reset IPP Port #%d\n",
244 	    portn));
245 	return (NXGE_ERROR | rs);
246 }
247 
248 /* ARGSUSED */
249 nxge_status_t
250 nxge_ipp_enable(p_nxge_t nxgep)
251 {
252 	uint8_t portn;
253 	uint32_t config;
254 	npi_handle_t handle;
255 	uint32_t pkt_size;
256 	npi_status_t rs = NPI_SUCCESS;
257 
258 	handle = nxgep->npi_handle;
259 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
260 
261 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn));
262 
263 	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
264 	    CFG_IPP_TCP_UDP_CKSUM;
265 	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
266 		goto fail;
267 	nxgep->ipp.config = config;
268 
269 	/* Set max packet size */
270 	pkt_size = IPP_MAX_PKT_SIZE;
271 	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
272 	    IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
273 		goto fail;
274 	nxgep->ipp.max_pkt_size = pkt_size;
275 
276 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn));
277 	return (NXGE_OK);
278 fail:
279 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
280 	    "nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn));
281 	return (NXGE_ERROR | rs);
282 }
283 
284 /* ARGSUSED */
285 nxge_status_t
286 nxge_ipp_drain(p_nxge_t nxgep)
287 {
288 	uint8_t portn;
289 	npi_handle_t handle;
290 	npi_status_t rs = NPI_SUCCESS;
291 	uint16_t wr_ptr, rd_ptr;
292 	uint32_t try_count;
293 
294 	handle = nxgep->npi_handle;
295 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
296 
297 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_drain: port%d", portn));
298 
299 	/*
300 	 * Wait until ip read and write fifo pointers are equal
301 	 */
302 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
303 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
304 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
305 
306 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
307 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
308 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
309 		try_count--;
310 	}
311 
312 	if (try_count == 0) {
313 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
314 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
315 			    " nxge_ipp_drain: port%d failed"
316 			    " rd_fifo != wr_fifo", portn));
317 			goto fail;
318 		}
319 	}
320 
321 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_drain: port%d", portn));
322 	return (NXGE_OK);
323 fail:
324 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_ipp_init: "
325 	    "Fail to Reset IPP Port #%d\n", portn));
326 	return (NXGE_ERROR | rs);
327 }
328 
329 /* ARGSUSED */
330 nxge_status_t
331 nxge_ipp_handle_sys_errors(p_nxge_t nxgep)
332 {
333 	npi_handle_t handle;
334 	npi_status_t rs = NPI_SUCCESS;
335 	p_nxge_ipp_stats_t statsp;
336 	ipp_status_t istatus;
337 	uint8_t portn;
338 	p_ipp_errlog_t errlogp;
339 	boolean_t rxport_fatal = B_FALSE;
340 	nxge_status_t status = NXGE_OK;
341 	uint8_t cnt8;
342 	uint16_t cnt16;
343 
344 	handle = nxgep->npi_handle;
345 	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
346 	portn = nxgep->mac.portnum;
347 
348 	errlogp = (p_ipp_errlog_t)&statsp->errlog;
349 
350 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
351 		return (NXGE_ERROR | rs);
352 
353 	if (istatus.value == 0) {
354 		/*
355 		 * The error is not initiated from this port, so just exit.
356 		 */
357 		return (NXGE_OK);
358 	}
359 
360 	if (istatus.bits.w0.dfifo_missed_sop) {
361 		statsp->sop_miss++;
362 		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
363 		    &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
364 			return (NXGE_ERROR | rs);
365 		if ((rs = npi_ipp_get_state_mach(handle, portn,
366 		    &errlogp->state_mach)) != NPI_SUCCESS)
367 			return (NXGE_ERROR | rs);
368 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
369 		    NXGE_FM_EREPORT_IPP_SOP_MISS);
370 		if (statsp->sop_miss < IPP_MAX_ERR_SHOW)
371 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
372 			    "nxge_ipp_err_evnts: fatal error: sop_miss\n"));
373 		rxport_fatal = B_TRUE;
374 	}
375 	if (istatus.bits.w0.dfifo_missed_eop) {
376 		statsp->eop_miss++;
377 		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
378 		    &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
379 			return (NXGE_ERROR | rs);
380 		if ((rs = npi_ipp_get_state_mach(handle, portn,
381 		    &errlogp->state_mach)) != NPI_SUCCESS)
382 			return (NXGE_ERROR | rs);
383 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
384 		    NXGE_FM_EREPORT_IPP_EOP_MISS);
385 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
386 		    "nxge_ipp_err_evnts: fatal error: eop_miss\n"));
387 		rxport_fatal = B_TRUE;
388 	}
389 	if (istatus.bits.w0.dfifo_uncorr_ecc_err) {
390 		boolean_t ue_ecc_valid;
391 
392 		if ((status = nxge_ipp_eccue_valid_check(nxgep,
393 		    &ue_ecc_valid)) != NXGE_OK)
394 			return (status);
395 
396 		if (ue_ecc_valid) {
397 			statsp->dfifo_ue++;
398 			if ((rs = npi_ipp_get_ecc_syndrome(handle, portn,
399 			    &errlogp->ecc_syndrome)) != NPI_SUCCESS)
400 				return (NXGE_ERROR | rs);
401 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
402 			    NXGE_FM_EREPORT_IPP_DFIFO_UE);
403 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
404 			    "nxge_ipp_err_evnts: fatal error: dfifo_ue\n"));
405 			rxport_fatal = B_TRUE;
406 		}
407 	}
408 	if (istatus.bits.w0.pre_fifo_perr) {
409 		statsp->pfifo_perr++;
410 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
411 		    NXGE_FM_EREPORT_IPP_PFIFO_PERR);
412 		if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW)
413 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
414 			    "nxge_ipp_err_evnts: "
415 			    "fatal error: pre_pifo_perr\n"));
416 		rxport_fatal = B_TRUE;
417 	}
418 	if (istatus.bits.w0.pre_fifo_overrun) {
419 		statsp->pfifo_over++;
420 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
421 		    NXGE_FM_EREPORT_IPP_PFIFO_OVER);
422 		if (statsp->pfifo_over < IPP_MAX_ERR_SHOW)
423 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
424 			    "nxge_ipp_err_evnts: "
425 			    "fatal error: pfifo_over\n"));
426 		rxport_fatal = B_TRUE;
427 	}
428 	if (istatus.bits.w0.pre_fifo_underrun) {
429 		statsp->pfifo_und++;
430 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
431 		    NXGE_FM_EREPORT_IPP_PFIFO_UND);
432 		if (statsp->pfifo_und < IPP_MAX_ERR_SHOW)
433 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
434 			    "nxge_ipp_err_evnts: "
435 			    "fatal error: pfifo_und\n"));
436 		rxport_fatal = B_TRUE;
437 	}
438 	if (istatus.bits.w0.bad_cksum_cnt_ovfl) {
439 		/*
440 		 * Do not send FMA ereport or log error message
441 		 * in /var/adm/messages because this error does not
442 		 * indicate a HW failure.
443 		 *
444 		 * Clear bit BAD_CS_MX of register IPP_INT_STAT
445 		 * by reading register IPP_BAD_CS_CNT
446 		 */
447 		(void) npi_ipp_get_cs_err_count(handle, portn, &cnt16);
448 		statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK;
449 	}
450 	if (istatus.bits.w0.pkt_discard_cnt_ovfl) {
451 		/*
452 		 * Do not send FMA ereport or log error message
453 		 * in /var/adm/messages because this error does not
454 		 * indicate a HW failure.
455 		 *
456 		 * Clear bit PKT_DIS_MX of register IPP_INT_STAT
457 		 * by reading register IPP_PKT_DIS
458 		 */
459 		(void) npi_ipp_get_pkt_dis_count(handle, portn, &cnt16);
460 		statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK;
461 	}
462 	if (istatus.bits.w0.ecc_err_cnt_ovfl) {
463 		/*
464 		 * Clear bit ECC_ERR_MAX of register IPP_INI_STAT
465 		 * by reading register IPP_ECC
466 		 */
467 		(void) npi_ipp_get_ecc_err_count(handle, portn, &cnt8);
468 		statsp->ecc_err_cnt += IPP_ECC_CNT_MASK;
469 		/*
470 		 * A defect in Neptune port2's IPP module could generate
471 		 * many fake but harmless ECC errors under stress and cause
472 		 * the ecc-error-counter register IPP_ECC to reach its
473 		 * maximum value in a few seconds. To avoid false alarm, do
474 		 * not report the error if it is port2.
475 		 */
476 		if (portn != 2) {
477 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
478 			    NXGE_FM_EREPORT_IPP_ECC_ERR_MAX);
479 			if (statsp->ecc_err_cnt < (IPP_MAX_ERR_SHOW *
480 			    IPP_ECC_CNT_MASK)) {
481 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
482 				    "nxge_ipp_err_evnts: pkt_ecc_err_max\n"));
483 			}
484 		}
485 	}
486 	/*
487 	 * Making sure that error source is cleared if this is an injected
488 	 * error.
489 	 */
490 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
491 
492 	if (rxport_fatal) {
493 		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
494 		    " nxge_ipp_handle_sys_errors:"
495 		    " fatal Error on  Port #%d\n", portn));
496 		status = nxge_ipp_fatal_err_recover(nxgep);
497 		if (status == NXGE_OK) {
498 			FM_SERVICE_RESTORED(nxgep);
499 		}
500 	}
501 	return (status);
502 }
503 
504 /* ARGSUSED */
505 void
506 nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id)
507 {
508 	ipp_status_t ipps;
509 	ipp_ecc_ctrl_t ecc_ctrl;
510 	uint8_t portn = nxgep->mac.portnum;
511 
512 	switch (err_id) {
513 	case NXGE_FM_EREPORT_IPP_DFIFO_UE:
514 		ecc_ctrl.value = 0;
515 		ecc_ctrl.bits.w0.cor_dbl = 1;
516 		ecc_ctrl.bits.w0.cor_1 = 1;
517 		ecc_ctrl.bits.w0.cor_lst = 1;
518 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
519 		    (unsigned long long) ecc_ctrl.value);
520 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
521 		    ecc_ctrl.value);
522 		break;
523 
524 	case NXGE_FM_EREPORT_IPP_DFIFO_CE:
525 		ecc_ctrl.value = 0;
526 		ecc_ctrl.bits.w0.cor_sng = 1;
527 		ecc_ctrl.bits.w0.cor_1 = 1;
528 		ecc_ctrl.bits.w0.cor_snd = 1;
529 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
530 		    (unsigned long long) ecc_ctrl.value);
531 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
532 		    ecc_ctrl.value);
533 		break;
534 
535 	case NXGE_FM_EREPORT_IPP_EOP_MISS:
536 	case NXGE_FM_EREPORT_IPP_SOP_MISS:
537 	case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
538 	case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
539 	case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
540 	case NXGE_FM_EREPORT_IPP_PFIFO_UND:
541 	case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
542 	case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
543 	case NXGE_FM_EREPORT_IPP_RESET_FAIL:
544 		IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
545 		    &ipps.value);
546 		if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS)
547 			ipps.bits.w0.dfifo_missed_eop = 1;
548 		else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS)
549 			ipps.bits.w0.dfifo_missed_sop = 1;
550 		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE)
551 			ipps.bits.w0.dfifo_uncorr_ecc_err = 1;
552 		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE)
553 			ipps.bits.w0.dfifo_corr_ecc_err = 1;
554 		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR)
555 			ipps.bits.w0.pre_fifo_perr = 1;
556 		else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX) {
557 			/*
558 			 * Fill register IPP_ECC with max ECC-error-
559 			 * counter value (0xff) to set the ECC_ERR_MAX bit
560 			 * of the IPP_INT_STAT register and trigger an
561 			 * FMA ereport.
562 			 */
563 			IPP_REG_WR(nxgep->npi_handle, portn,
564 			    IPP_ECC_ERR_COUNTER_REG, IPP_ECC_CNT_MASK);
565 		} else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER)
566 			ipps.bits.w0.pre_fifo_overrun = 1;
567 		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND)
568 			ipps.bits.w0.pre_fifo_underrun = 1;
569 		else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX) {
570 			/*
571 			 * Fill IPP_BAD_CS_CNT with max bad-checksum-counter
572 			 * value (0x3fff) to set the BAD_CS_MX bit of
573 			 * IPP_INT_STAT and trigger an FMA ereport.
574 			 */
575 			IPP_REG_WR(nxgep->npi_handle, portn,
576 			    IPP_BAD_CKSUM_ERR_CNT_REG, IPP_BAD_CS_CNT_MASK);
577 		} else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX) {
578 			/*
579 			 * Fill IPP_PKT_DIS with max packet-discard-counter
580 			 * value (0x3fff) to set the PKT_DIS_MX bit of
581 			 * IPP_INT_STAT and trigger an FMA ereport.
582 			 */
583 			IPP_REG_WR(nxgep->npi_handle, portn,
584 			    IPP_DISCARD_PKT_CNT_REG, IPP_PKT_DIS_CNT_MASK);
585 		}
586 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n",
587 		    (unsigned long long) ipps.value);
588 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
589 		    ipps.value);
590 		break;
591 	}
592 }
593 
594 /* ARGSUSED */
595 nxge_status_t
596 nxge_ipp_fatal_err_recover(p_nxge_t nxgep)
597 {
598 	npi_handle_t handle;
599 	npi_status_t rs = NPI_SUCCESS;
600 	nxge_status_t status = NXGE_OK;
601 	uint8_t portn;
602 	uint16_t wr_ptr;
603 	uint16_t rd_ptr;
604 	uint32_t try_count;
605 	uint32_t dfifo_entries;
606 	ipp_status_t istatus;
607 	uint32_t d0, d1, d2, d3, d4;
608 	int i;
609 
610 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover"));
611 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
612 	    "Recovering from RxPort error..."));
613 
614 	handle = nxgep->npi_handle;
615 	portn = nxgep->mac.portnum;
616 
617 	/*
618 	 * Making sure that error source is cleared if this is an injected
619 	 * error.
620 	 */
621 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
622 
623 	/* Disable RxMAC */
624 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
625 		goto fail;
626 
627 	/* When recovering from IPP, RxDMA channel resets are not necessary */
628 	/* Reset ZCP CFIFO */
629 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn));
630 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
631 		goto fail;
632 
633 	/*
634 	 * Wait until ip read and write fifo pointers are equal
635 	 */
636 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
637 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
638 	try_count = 512;
639 
640 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
641 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
642 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
643 		try_count--;
644 	}
645 
646 	if (try_count == 0) {
647 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
648 		    " nxge_ipp_reset: port%d IPP stalled..."
649 		    " rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x",
650 		    portn, rd_ptr, wr_ptr));
651 		/*
652 		 * This means the fatal error occurred on the first line of the
653 		 * fifo. In this case, just reset the IPP without draining the
654 		 * PFIFO.
655 		 */
656 	}
657 
658 	if (nxgep->niu_type == N2_NIU) {
659 		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
660 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
661 		if (portn < 2)
662 			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
663 		else
664 			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
665 	} else {
666 		goto fail;
667 	}
668 
669 	/* Clean up DFIFO SRAM entries */
670 	for (i = 0; i < dfifo_entries; i++) {
671 		if ((rs = npi_ipp_write_dfifo(handle, portn,
672 		    i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
673 			goto fail;
674 		if ((rs = npi_ipp_read_dfifo(handle, portn, i,
675 		    &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
676 			goto fail;
677 	}
678 
679 	/* Clear PFIFO DFIFO status bits */
680 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
681 		goto fail;
682 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
683 		goto fail;
684 
685 	/* Reset IPP */
686 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn));
687 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
688 		goto fail;
689 
690 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn));
691 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
692 		goto fail;
693 
694 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn));
695 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
696 		goto fail;
697 
698 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn));
699 	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
700 		goto fail;
701 
702 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
703 	    "Recovery successful, RxPort restored"));
704 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover"));
705 
706 	return (NXGE_OK);
707 fail:
708 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
709 	return (status | rs);
710 }
711 
712 /* ARGSUSED */
713 /*
714  *    A hardware bug may cause fake ECCUEs (ECC Uncorrectable Error).
715  * This function checks if a ECCUE is real(valid) or not.  It is not
716  * real if rd_ptr == wr_ptr.
717  *    The hardware module that has the bug is used not only by the IPP
718  * FIFO but also by the ZCP FIFO, therefore this function is also
719  * called by nxge_zcp_handle_sys_errors for validating the ZCP FIFO
720  * error.
721  */
722 nxge_status_t
723 nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid)
724 {
725 	npi_handle_t handle;
726 	npi_status_t rs = NPI_SUCCESS;
727 	uint8_t portn;
728 	uint16_t rd_ptr;
729 	uint16_t wr_ptr;
730 	uint16_t curr_rd_ptr;
731 	uint16_t curr_wr_ptr;
732 	uint32_t stall_cnt;
733 	uint32_t d0, d1, d2, d3, d4;
734 
735 	handle = nxgep->npi_handle;
736 	portn = nxgep->mac.portnum;
737 	*valid = B_TRUE;
738 
739 	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr))
740 	    != NPI_SUCCESS)
741 		goto fail;
742 	if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr))
743 	    != NPI_SUCCESS)
744 		goto fail;
745 
746 	if (rd_ptr == wr_ptr) {
747 		*valid = B_FALSE; /* FIFO not stuck, so it's not a real ECCUE */
748 	} else {
749 		stall_cnt = 0;
750 		/*
751 		 * Check if the two pointers are moving, the ECCUE is invali
752 		 * if either pointer is moving, which indicates that the FIFO
753 		 * is functional.
754 		 */
755 		while (stall_cnt < 16) {
756 			if ((rs = npi_ipp_get_dfifo_rd_ptr(handle,
757 			    portn, &curr_rd_ptr)) != NPI_SUCCESS)
758 				goto fail;
759 			if ((rs = npi_ipp_get_dfifo_wr_ptr(handle,
760 			    portn, &curr_wr_ptr)) != NPI_SUCCESS)
761 				goto fail;
762 
763 			if (rd_ptr == curr_rd_ptr && wr_ptr == curr_wr_ptr) {
764 				stall_cnt++;
765 			} else {
766 				*valid = B_FALSE;
767 				break;
768 			}
769 		}
770 
771 		if (valid) {
772 			/*
773 			 * Further check to see if the ECCUE is valid. The
774 			 * error is real if the LSB of d4 is 1, which
775 			 * indicates that the data that has set the ECC
776 			 * error flag is the 16-byte internal control word.
777 			 */
778 			if ((rs = npi_ipp_read_dfifo(handle, portn, rd_ptr,
779 			    &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
780 				goto fail;
781 			if ((d4 & 0x1) == 0)	/* Not the 1st line */
782 				*valid = B_FALSE;
783 		}
784 	}
785 	return (NXGE_OK);
786 fail:
787 	return (NXGE_ERROR | rs);
788 }
789