xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_ipp.c (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <nxge_impl.h>
27 #include <nxge_ipp.h>
28 
29 #define	NXGE_IPP_FIFO_SYNC_TRY_COUNT 100
30 
31 /* ARGSUSED */
32 nxge_status_t
33 nxge_ipp_init(p_nxge_t nxgep)
34 {
35 	uint8_t portn;
36 	uint32_t config;
37 	npi_handle_t handle;
38 	uint32_t pkt_size;
39 	ipp_status_t istatus;
40 	npi_status_t rs = NPI_SUCCESS;
41 	uint64_t val;
42 	uint32_t d0, d1, d2, d3, d4;
43 	int i;
44 	uint32_t dfifo_entries;
45 
46 	handle = nxgep->npi_handle;
47 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
48 
49 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn));
50 
51 	/* Initialize ECC and parity in SRAM of DFIFO and PFIFO */
52 	if (nxgep->niu_type == N2_NIU) {
53 		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
54 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
55 		if (portn < 2)
56 			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
57 		else
58 			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
59 	} else {
60 		goto fail;
61 	}
62 
63 	for (i = 0; i < dfifo_entries; i++) {
64 		if ((rs = npi_ipp_write_dfifo(handle,
65 		    portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
66 			goto fail;
67 		if ((rs = npi_ipp_read_dfifo(handle, portn,
68 		    i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
69 			goto fail;
70 	}
71 
72 	/* Clear PFIFO DFIFO status bits */
73 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
74 		goto fail;
75 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
76 		goto fail;
77 
78 	/*
79 	 * Soft reset to make sure we bring the FIFO pointers back to the
80 	 * original initial position.
81 	 */
82 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
83 		goto fail;
84 
85 	/* Clean up ECC counter */
86 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val);
87 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_BAD_CKSUM_ERR_CNT_REG, &val);
88 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val);
89 
90 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
91 		goto fail;
92 
93 	/* Configure IPP port */
94 	if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL))
95 	    != NPI_SUCCESS)
96 		goto fail;
97 	nxgep->ipp.iconfig = ICFG_IPP_ALL;
98 
99 	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
100 	    CFG_IPP_TCP_UDP_CKSUM;
101 	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
102 		goto fail;
103 	nxgep->ipp.config = config;
104 
105 	/* Set max packet size */
106 	pkt_size = IPP_MAX_PKT_SIZE;
107 	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
108 	    IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
109 		goto fail;
110 	nxgep->ipp.max_pkt_size = pkt_size;
111 
112 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn));
113 
114 	return (NXGE_OK);
115 fail:
116 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
117 	    "nxge_ipp_init: Fail to initialize IPP Port #%d\n",
118 	    portn));
119 	return (NXGE_ERROR | rs);
120 }
121 
122 /* ARGSUSED */
123 nxge_status_t
124 nxge_ipp_disable(p_nxge_t nxgep)
125 {
126 	uint8_t portn;
127 	uint32_t config;
128 	npi_handle_t handle;
129 	npi_status_t rs = NPI_SUCCESS;
130 	uint16_t wr_ptr, rd_ptr;
131 	uint32_t try_count;
132 
133 	handle = nxgep->npi_handle;
134 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
135 
136 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn));
137 	(void) nxge_rx_mac_disable(nxgep);
138 
139 	/*
140 	 * Wait until ip read and write fifo pointers are equal
141 	 */
142 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
143 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
144 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
145 
146 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
147 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
148 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
149 		try_count--;
150 	}
151 
152 	if (try_count == 0) {
153 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
154 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
155 			    " nxge_ipp_disable: port%d failed"
156 			    " rd_fifo != wr_fifo", portn));
157 			goto fail;
158 		}
159 	}
160 	/* disable the IPP */
161 	config = nxgep->ipp.config;
162 	if ((rs = npi_ipp_config(handle, DISABLE,
163 	    portn, config)) != NPI_SUCCESS)
164 		goto fail;
165 
166 	/* IPP soft reset */
167 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
168 		goto fail;
169 
170 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn));
171 	return (NXGE_OK);
172 fail:
173 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
174 	    "nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn));
175 	return (NXGE_ERROR | rs);
176 }
177 
178 /* ARGSUSED */
179 nxge_status_t
180 nxge_ipp_reset(p_nxge_t nxgep)
181 {
182 	uint8_t portn;
183 	uint32_t config;
184 	npi_handle_t handle;
185 	npi_status_t rs = NPI_SUCCESS;
186 	uint16_t wr_ptr, rd_ptr;
187 	uint32_t try_count;
188 
189 	handle = nxgep->npi_handle;
190 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
191 
192 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn));
193 
194 	/* disable the IPP */
195 	config = nxgep->ipp.config;
196 	if ((rs = npi_ipp_config(handle, DISABLE,
197 	    portn, config)) != NPI_SUCCESS)
198 		goto fail;
199 
200 	/*
201 	 * Wait until ip read and write fifo pointers are equal
202 	 */
203 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
204 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
205 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
206 
207 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
208 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
209 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
210 		try_count--;
211 	}
212 
213 	if (try_count == 0) {
214 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
215 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
216 			    " nxge_ipp_disable: port%d failed"
217 			    " rd_fifo != wr_fifo", portn));
218 			goto fail;
219 		}
220 	}
221 
222 	/* IPP soft reset */
223 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) {
224 		goto fail;
225 	}
226 
227 	/* to reset control FIFO */
228 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
229 		goto fail;
230 
231 	/*
232 	 * Making sure that error source is cleared if this is an injected
233 	 * error.
234 	 */
235 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
236 
237 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn));
238 	return (NXGE_OK);
239 fail:
240 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
241 	    "nxge_ipp_init: Fail to Reset IPP Port #%d\n",
242 	    portn));
243 	return (NXGE_ERROR | rs);
244 }
245 
246 /* ARGSUSED */
247 nxge_status_t
248 nxge_ipp_enable(p_nxge_t nxgep)
249 {
250 	uint8_t portn;
251 	uint32_t config;
252 	npi_handle_t handle;
253 	uint32_t pkt_size;
254 	npi_status_t rs = NPI_SUCCESS;
255 
256 	handle = nxgep->npi_handle;
257 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
258 
259 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn));
260 
261 	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
262 	    CFG_IPP_TCP_UDP_CKSUM;
263 	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
264 		goto fail;
265 	nxgep->ipp.config = config;
266 
267 	/* Set max packet size */
268 	pkt_size = IPP_MAX_PKT_SIZE;
269 	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
270 	    IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
271 		goto fail;
272 	nxgep->ipp.max_pkt_size = pkt_size;
273 
274 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn));
275 	return (NXGE_OK);
276 fail:
277 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
278 	    "nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn));
279 	return (NXGE_ERROR | rs);
280 }
281 
282 /* ARGSUSED */
283 nxge_status_t
284 nxge_ipp_drain(p_nxge_t nxgep)
285 {
286 	uint8_t portn;
287 	npi_handle_t handle;
288 	npi_status_t rs = NPI_SUCCESS;
289 	uint16_t wr_ptr, rd_ptr;
290 	uint32_t try_count;
291 
292 	handle = nxgep->npi_handle;
293 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
294 
295 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_drain: port%d", portn));
296 
297 	/*
298 	 * Wait until ip read and write fifo pointers are equal
299 	 */
300 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
301 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
302 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
303 
304 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
305 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
306 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
307 		try_count--;
308 	}
309 
310 	if (try_count == 0) {
311 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
312 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
313 			    " nxge_ipp_drain: port%d failed"
314 			    " rd_fifo != wr_fifo", portn));
315 			goto fail;
316 		}
317 	}
318 
319 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_drain: port%d", portn));
320 	return (NXGE_OK);
321 fail:
322 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_ipp_init: "
323 	    "Fail to Reset IPP Port #%d\n", portn));
324 	return (NXGE_ERROR | rs);
325 }
326 
327 /* ARGSUSED */
328 nxge_status_t
329 nxge_ipp_handle_sys_errors(p_nxge_t nxgep)
330 {
331 	npi_handle_t handle;
332 	npi_status_t rs = NPI_SUCCESS;
333 	p_nxge_ipp_stats_t statsp;
334 	ipp_status_t istatus;
335 	uint8_t portn;
336 	p_ipp_errlog_t errlogp;
337 	boolean_t rxport_fatal = B_FALSE;
338 	nxge_status_t status = NXGE_OK;
339 	uint8_t cnt8;
340 	uint16_t cnt16;
341 
342 	handle = nxgep->npi_handle;
343 	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
344 	portn = nxgep->mac.portnum;
345 
346 	errlogp = (p_ipp_errlog_t)&statsp->errlog;
347 
348 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
349 		return (NXGE_ERROR | rs);
350 
351 	if (istatus.value == 0) {
352 		/*
353 		 * The error is not initiated from this port, so just exit.
354 		 */
355 		return (NXGE_OK);
356 	}
357 
358 	if (istatus.bits.w0.dfifo_missed_sop) {
359 		statsp->sop_miss++;
360 		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
361 		    &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
362 			return (NXGE_ERROR | rs);
363 		if ((rs = npi_ipp_get_state_mach(handle, portn,
364 		    &errlogp->state_mach)) != NPI_SUCCESS)
365 			return (NXGE_ERROR | rs);
366 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
367 		    NXGE_FM_EREPORT_IPP_SOP_MISS);
368 		if (statsp->sop_miss < IPP_MAX_ERR_SHOW)
369 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
370 			    "nxge_ipp_err_evnts: fatal error: sop_miss\n"));
371 		rxport_fatal = B_TRUE;
372 	}
373 	if (istatus.bits.w0.dfifo_missed_eop) {
374 		statsp->eop_miss++;
375 		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
376 		    &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
377 			return (NXGE_ERROR | rs);
378 		if ((rs = npi_ipp_get_state_mach(handle, portn,
379 		    &errlogp->state_mach)) != NPI_SUCCESS)
380 			return (NXGE_ERROR | rs);
381 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
382 		    NXGE_FM_EREPORT_IPP_EOP_MISS);
383 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
384 		    "nxge_ipp_err_evnts: fatal error: eop_miss\n"));
385 		rxport_fatal = B_TRUE;
386 	}
387 	if (istatus.bits.w0.dfifo_uncorr_ecc_err) {
388 		boolean_t ue_ecc_valid;
389 
390 		if ((status = nxge_ipp_eccue_valid_check(nxgep,
391 		    &ue_ecc_valid)) != NXGE_OK)
392 			return (status);
393 
394 		if (ue_ecc_valid) {
395 			statsp->dfifo_ue++;
396 			if ((rs = npi_ipp_get_ecc_syndrome(handle, portn,
397 			    &errlogp->ecc_syndrome)) != NPI_SUCCESS)
398 				return (NXGE_ERROR | rs);
399 			NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
400 			    NXGE_FM_EREPORT_IPP_DFIFO_UE);
401 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
402 			    "nxge_ipp_err_evnts: fatal error: dfifo_ue\n"));
403 			rxport_fatal = B_TRUE;
404 		}
405 	}
406 	if (istatus.bits.w0.pre_fifo_perr) {
407 		statsp->pfifo_perr++;
408 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
409 		    NXGE_FM_EREPORT_IPP_PFIFO_PERR);
410 		if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW)
411 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
412 			    "nxge_ipp_err_evnts: "
413 			    "fatal error: pre_pifo_perr\n"));
414 		rxport_fatal = B_TRUE;
415 	}
416 	if (istatus.bits.w0.pre_fifo_overrun) {
417 		statsp->pfifo_over++;
418 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
419 		    NXGE_FM_EREPORT_IPP_PFIFO_OVER);
420 		if (statsp->pfifo_over < IPP_MAX_ERR_SHOW)
421 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
422 			    "nxge_ipp_err_evnts: "
423 			    "fatal error: pfifo_over\n"));
424 		rxport_fatal = B_TRUE;
425 	}
426 	if (istatus.bits.w0.pre_fifo_underrun) {
427 		statsp->pfifo_und++;
428 		NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
429 		    NXGE_FM_EREPORT_IPP_PFIFO_UND);
430 		if (statsp->pfifo_und < IPP_MAX_ERR_SHOW)
431 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
432 			    "nxge_ipp_err_evnts: "
433 			    "fatal error: pfifo_und\n"));
434 		rxport_fatal = B_TRUE;
435 	}
436 	if (istatus.bits.w0.bad_cksum_cnt_ovfl) {
437 		/*
438 		 * Do not send FMA ereport or log error message
439 		 * in /var/adm/messages because this error does not
440 		 * indicate a HW failure.
441 		 *
442 		 * Clear bit BAD_CS_MX of register IPP_INT_STAT
443 		 * by reading register IPP_BAD_CS_CNT
444 		 */
445 		(void) npi_ipp_get_cs_err_count(handle, portn, &cnt16);
446 		statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK;
447 	}
448 	if (istatus.bits.w0.pkt_discard_cnt_ovfl) {
449 		/*
450 		 * Do not send FMA ereport or log error message
451 		 * in /var/adm/messages because this error does not
452 		 * indicate a HW failure.
453 		 *
454 		 * Clear bit PKT_DIS_MX of register IPP_INT_STAT
455 		 * by reading register IPP_PKT_DIS
456 		 */
457 		(void) npi_ipp_get_pkt_dis_count(handle, portn, &cnt16);
458 		statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK;
459 	}
460 	if (istatus.bits.w0.ecc_err_cnt_ovfl) {
461 		/*
462 		 * Clear bit ECC_ERR_MAX of register IPP_INI_STAT
463 		 * by reading register IPP_ECC
464 		 */
465 		(void) npi_ipp_get_ecc_err_count(handle, portn, &cnt8);
466 		statsp->ecc_err_cnt += IPP_ECC_CNT_MASK;
467 		/*
468 		 * A defect in Neptune port2's IPP module could generate
469 		 * many fake but harmless ECC errors under stress and cause
470 		 * the ecc-error-counter register IPP_ECC to reach its
471 		 * maximum value in a few seconds. To avoid false alarm, do
472 		 * not report the error if it is port2.
473 		 */
474 		if (portn != 2) {
475 			NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
476 			    NXGE_FM_EREPORT_IPP_ECC_ERR_MAX);
477 			if (statsp->ecc_err_cnt < (IPP_MAX_ERR_SHOW *
478 			    IPP_ECC_CNT_MASK)) {
479 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
480 				    "nxge_ipp_err_evnts: pkt_ecc_err_max\n"));
481 			}
482 		}
483 	}
484 	/*
485 	 * Making sure that error source is cleared if this is an injected
486 	 * error.
487 	 */
488 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
489 
490 	if (rxport_fatal) {
491 		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
492 		    " nxge_ipp_handle_sys_errors:"
493 		    " fatal Error on  Port #%d\n", portn));
494 		status = nxge_ipp_fatal_err_recover(nxgep);
495 		if (status == NXGE_OK) {
496 			FM_SERVICE_RESTORED(nxgep);
497 		}
498 	}
499 	return (status);
500 }
501 
502 /* ARGSUSED */
503 void
504 nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id)
505 {
506 	ipp_status_t ipps;
507 	ipp_ecc_ctrl_t ecc_ctrl;
508 	uint8_t portn = nxgep->mac.portnum;
509 
510 	switch (err_id) {
511 	case NXGE_FM_EREPORT_IPP_DFIFO_UE:
512 		ecc_ctrl.value = 0;
513 		ecc_ctrl.bits.w0.cor_dbl = 1;
514 		ecc_ctrl.bits.w0.cor_1 = 1;
515 		ecc_ctrl.bits.w0.cor_lst = 1;
516 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
517 		    (unsigned long long) ecc_ctrl.value);
518 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
519 		    ecc_ctrl.value);
520 		break;
521 
522 	case NXGE_FM_EREPORT_IPP_DFIFO_CE:
523 		ecc_ctrl.value = 0;
524 		ecc_ctrl.bits.w0.cor_sng = 1;
525 		ecc_ctrl.bits.w0.cor_1 = 1;
526 		ecc_ctrl.bits.w0.cor_snd = 1;
527 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
528 		    (unsigned long long) ecc_ctrl.value);
529 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
530 		    ecc_ctrl.value);
531 		break;
532 
533 	case NXGE_FM_EREPORT_IPP_EOP_MISS:
534 	case NXGE_FM_EREPORT_IPP_SOP_MISS:
535 	case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
536 	case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
537 	case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
538 	case NXGE_FM_EREPORT_IPP_PFIFO_UND:
539 	case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
540 	case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
541 	case NXGE_FM_EREPORT_IPP_RESET_FAIL:
542 		IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
543 		    &ipps.value);
544 		if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS)
545 			ipps.bits.w0.dfifo_missed_eop = 1;
546 		else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS)
547 			ipps.bits.w0.dfifo_missed_sop = 1;
548 		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE)
549 			ipps.bits.w0.dfifo_uncorr_ecc_err = 1;
550 		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE)
551 			ipps.bits.w0.dfifo_corr_ecc_err = 1;
552 		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR)
553 			ipps.bits.w0.pre_fifo_perr = 1;
554 		else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX) {
555 			/*
556 			 * Fill register IPP_ECC with max ECC-error-
557 			 * counter value (0xff) to set the ECC_ERR_MAX bit
558 			 * of the IPP_INT_STAT register and trigger an
559 			 * FMA ereport.
560 			 */
561 			IPP_REG_WR(nxgep->npi_handle, portn,
562 			    IPP_ECC_ERR_COUNTER_REG, IPP_ECC_CNT_MASK);
563 		} else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER)
564 			ipps.bits.w0.pre_fifo_overrun = 1;
565 		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND)
566 			ipps.bits.w0.pre_fifo_underrun = 1;
567 		else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX) {
568 			/*
569 			 * Fill IPP_BAD_CS_CNT with max bad-checksum-counter
570 			 * value (0x3fff) to set the BAD_CS_MX bit of
571 			 * IPP_INT_STAT and trigger an FMA ereport.
572 			 */
573 			IPP_REG_WR(nxgep->npi_handle, portn,
574 			    IPP_BAD_CKSUM_ERR_CNT_REG, IPP_BAD_CS_CNT_MASK);
575 		} else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX) {
576 			/*
577 			 * Fill IPP_PKT_DIS with max packet-discard-counter
578 			 * value (0x3fff) to set the PKT_DIS_MX bit of
579 			 * IPP_INT_STAT and trigger an FMA ereport.
580 			 */
581 			IPP_REG_WR(nxgep->npi_handle, portn,
582 			    IPP_DISCARD_PKT_CNT_REG, IPP_PKT_DIS_CNT_MASK);
583 		}
584 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n",
585 		    (unsigned long long) ipps.value);
586 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
587 		    ipps.value);
588 		break;
589 	}
590 }
591 
592 /* ARGSUSED */
593 nxge_status_t
594 nxge_ipp_fatal_err_recover(p_nxge_t nxgep)
595 {
596 	npi_handle_t handle;
597 	npi_status_t rs = NPI_SUCCESS;
598 	nxge_status_t status = NXGE_OK;
599 	uint8_t portn;
600 	uint16_t wr_ptr;
601 	uint16_t rd_ptr;
602 	uint32_t try_count;
603 	uint32_t dfifo_entries;
604 	ipp_status_t istatus;
605 	uint32_t d0, d1, d2, d3, d4;
606 	int i;
607 
608 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover"));
609 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
610 	    "Recovering from RxPort error..."));
611 
612 	handle = nxgep->npi_handle;
613 	portn = nxgep->mac.portnum;
614 
615 	/*
616 	 * Making sure that error source is cleared if this is an injected
617 	 * error.
618 	 */
619 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
620 
621 	/* Disable RxMAC */
622 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
623 		goto fail;
624 
625 	/* When recovering from IPP, RxDMA channel resets are not necessary */
626 	/* Reset ZCP CFIFO */
627 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn));
628 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
629 		goto fail;
630 
631 	/*
632 	 * Wait until ip read and write fifo pointers are equal
633 	 */
634 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
635 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
636 	try_count = 512;
637 
638 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
639 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
640 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
641 		try_count--;
642 	}
643 
644 	if (try_count == 0) {
645 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
646 		    " nxge_ipp_reset: port%d IPP stalled..."
647 		    " rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x",
648 		    portn, rd_ptr, wr_ptr));
649 		/*
650 		 * This means the fatal error occurred on the first line of the
651 		 * fifo. In this case, just reset the IPP without draining the
652 		 * PFIFO.
653 		 */
654 	}
655 
656 	if (nxgep->niu_type == N2_NIU) {
657 		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
658 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
659 		if (portn < 2)
660 			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
661 		else
662 			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
663 	} else {
664 		goto fail;
665 	}
666 
667 	/* Clean up DFIFO SRAM entries */
668 	for (i = 0; i < dfifo_entries; i++) {
669 		if ((rs = npi_ipp_write_dfifo(handle, portn,
670 		    i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
671 			goto fail;
672 		if ((rs = npi_ipp_read_dfifo(handle, portn, i,
673 		    &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
674 			goto fail;
675 	}
676 
677 	/* Clear PFIFO DFIFO status bits */
678 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
679 		goto fail;
680 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
681 		goto fail;
682 
683 	/* Reset IPP */
684 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn));
685 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
686 		goto fail;
687 
688 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn));
689 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
690 		goto fail;
691 
692 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn));
693 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
694 		goto fail;
695 
696 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn));
697 	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
698 		goto fail;
699 
700 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
701 	    "Recovery successful, RxPort restored"));
702 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover"));
703 
704 	return (NXGE_OK);
705 fail:
706 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
707 	return (status | rs);
708 }
709 
710 /* ARGSUSED */
711 /*
712  *    A hardware bug may cause fake ECCUEs (ECC Uncorrectable Error).
713  * This function checks if a ECCUE is real(valid) or not.  It is not
714  * real if rd_ptr == wr_ptr.
715  *    The hardware module that has the bug is used not only by the IPP
716  * FIFO but also by the ZCP FIFO, therefore this function is also
717  * called by nxge_zcp_handle_sys_errors for validating the ZCP FIFO
718  * error.
719  */
720 nxge_status_t
721 nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid)
722 {
723 	npi_handle_t handle;
724 	npi_status_t rs = NPI_SUCCESS;
725 	uint8_t portn;
726 	uint16_t rd_ptr;
727 	uint16_t wr_ptr;
728 	uint16_t curr_rd_ptr;
729 	uint16_t curr_wr_ptr;
730 	uint32_t stall_cnt;
731 	uint32_t d0, d1, d2, d3, d4;
732 
733 	handle = nxgep->npi_handle;
734 	portn = nxgep->mac.portnum;
735 	*valid = B_TRUE;
736 
737 	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr))
738 	    != NPI_SUCCESS)
739 		goto fail;
740 	if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr))
741 	    != NPI_SUCCESS)
742 		goto fail;
743 
744 	if (rd_ptr == wr_ptr) {
745 		*valid = B_FALSE; /* FIFO not stuck, so it's not a real ECCUE */
746 	} else {
747 		stall_cnt = 0;
748 		/*
749 		 * Check if the two pointers are moving, the ECCUE is invali
750 		 * if either pointer is moving, which indicates that the FIFO
751 		 * is functional.
752 		 */
753 		while (stall_cnt < 16) {
754 			if ((rs = npi_ipp_get_dfifo_rd_ptr(handle,
755 			    portn, &curr_rd_ptr)) != NPI_SUCCESS)
756 				goto fail;
757 			if ((rs = npi_ipp_get_dfifo_wr_ptr(handle,
758 			    portn, &curr_wr_ptr)) != NPI_SUCCESS)
759 				goto fail;
760 
761 			if (rd_ptr == curr_rd_ptr && wr_ptr == curr_wr_ptr) {
762 				stall_cnt++;
763 			} else {
764 				*valid = B_FALSE;
765 				break;
766 			}
767 		}
768 
769 		if (valid) {
770 			/*
771 			 * Further check to see if the ECCUE is valid. The
772 			 * error is real if the LSB of d4 is 1, which
773 			 * indicates that the data that has set the ECC
774 			 * error flag is the 16-byte internal control word.
775 			 */
776 			if ((rs = npi_ipp_read_dfifo(handle, portn, rd_ptr,
777 			    &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
778 				goto fail;
779 			if ((d4 & 0x1) == 0)	/* Not the 1st line */
780 				*valid = B_FALSE;
781 		}
782 	}
783 	return (NXGE_OK);
784 fail:
785 	return (NXGE_ERROR | rs);
786 }
787