xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_ipp.c (revision da6c28aaf62fa55f0fdb8004aa40f88f23bf53f0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <nxge_impl.h>
29 #include <nxge_ipp.h>
30 
31 #define	NXGE_IPP_FIFO_SYNC_TRY_COUNT 100
32 
33 /* ARGSUSED */
34 nxge_status_t
35 nxge_ipp_init(p_nxge_t nxgep)
36 {
37 	uint8_t portn;
38 	uint32_t config;
39 	npi_handle_t handle;
40 	uint32_t pkt_size;
41 	ipp_status_t istatus;
42 	npi_status_t rs = NPI_SUCCESS;
43 	uint64_t val;
44 	uint32_t d0, d1, d2, d3, d4;
45 	int i;
46 	uint32_t dfifo_entries;
47 
48 	handle = nxgep->npi_handle;
49 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
50 
51 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn));
52 
53 	/* Initialize ECC and parity in SRAM of DFIFO and PFIFO */
54 	if (nxgep->niu_type == N2_NIU) {
55 		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
56 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
57 		if (portn < 2)
58 			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
59 		else
60 			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
61 	} else {
62 		goto fail;
63 	}
64 
65 	for (i = 0; i < dfifo_entries; i++) {
66 		if ((rs = npi_ipp_write_dfifo(handle,
67 				portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
68 			goto fail;
69 		if ((rs = npi_ipp_read_dfifo(handle, portn,
70 				i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
71 			goto fail;
72 	}
73 
74 	/* Clear PFIFO DFIFO status bits */
75 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
76 		goto fail;
77 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
78 		goto fail;
79 
80 	/*
81 	 * Soft reset to make sure we bring the FIFO pointers back to the
82 	 * original initial position.
83 	 */
84 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
85 		goto fail;
86 
87 	/* Clean up ECC counter */
88 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val);
89 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_TCP_CKSUM_ERR_CNT_REG, &val);
90 	IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val);
91 
92 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
93 		goto fail;
94 
95 	/* Configure IPP port */
96 	if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL))
97 			!= NPI_SUCCESS)
98 		goto fail;
99 	nxgep->ipp.iconfig = ICFG_IPP_ALL;
100 
101 	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
102 		CFG_IPP_TCP_UDP_CKSUM;
103 	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
104 		goto fail;
105 	nxgep->ipp.config = config;
106 
107 	/* Set max packet size */
108 	pkt_size = IPP_MAX_PKT_SIZE;
109 	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
110 			IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
111 		goto fail;
112 	nxgep->ipp.max_pkt_size = pkt_size;
113 
114 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn));
115 
116 	return (NXGE_OK);
117 fail:
118 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
119 			"nxge_ipp_init: Fail to initialize IPP Port #%d\n",
120 			portn));
121 	return (NXGE_ERROR | rs);
122 }
123 
124 /* ARGSUSED */
125 nxge_status_t
126 nxge_ipp_disable(p_nxge_t nxgep)
127 {
128 	uint8_t portn;
129 	uint32_t config;
130 	npi_handle_t handle;
131 	npi_status_t rs = NPI_SUCCESS;
132 	uint16_t wr_ptr, rd_ptr;
133 	uint32_t try_count;
134 
135 	handle = nxgep->npi_handle;
136 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
137 
138 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn));
139 	(void) nxge_rx_mac_disable(nxgep);
140 
141 	/*
142 	 * Wait until ip read and write fifo pointers are equal
143 	 */
144 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
145 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
146 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
147 
148 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
149 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
150 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
151 		try_count--;
152 	}
153 
154 	if (try_count == 0) {
155 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
156 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
157 				" nxge_ipp_disable: port%d failed"
158 				" rd_fifo != wr_fifo", portn));
159 			goto fail;
160 		}
161 	}
162 	/* disable the IPP */
163 	config = nxgep->ipp.config;
164 	if ((rs = npi_ipp_config(handle, DISABLE,
165 			portn, config)) != NPI_SUCCESS)
166 		goto fail;
167 
168 	/* IPP soft reset */
169 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
170 		goto fail;
171 
172 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn));
173 	return (NXGE_OK);
174 fail:
175 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
176 		"nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn));
177 	return (NXGE_ERROR | rs);
178 }
179 
180 /* ARGSUSED */
181 nxge_status_t
182 nxge_ipp_reset(p_nxge_t nxgep)
183 {
184 	uint8_t portn;
185 	uint32_t config;
186 	npi_handle_t handle;
187 	npi_status_t rs = NPI_SUCCESS;
188 	uint16_t wr_ptr, rd_ptr;
189 	uint32_t try_count;
190 
191 	handle = nxgep->npi_handle;
192 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
193 
194 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn));
195 
196 	/* disable the IPP */
197 	config = nxgep->ipp.config;
198 	if ((rs = npi_ipp_config(handle, DISABLE,
199 			portn, config)) != NPI_SUCCESS)
200 		goto fail;
201 
202 	/*
203 	 * Wait until ip read and write fifo pointers are equal
204 	 */
205 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
206 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
207 	try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT;
208 
209 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
210 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
211 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
212 		try_count--;
213 	}
214 
215 	if (try_count == 0) {
216 		if ((rd_ptr != 0) && (wr_ptr != 1)) {
217 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
218 				" nxge_ipp_disable: port%d failed"
219 				" rd_fifo != wr_fifo", portn));
220 			goto fail;
221 		}
222 	}
223 
224 	/* IPP soft reset */
225 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) {
226 		goto fail;
227 	}
228 
229 	/* to reset control FIFO */
230 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
231 		goto fail;
232 
233 	/*
234 	 * Making sure that error source is cleared if this is an injected
235 	 * error.
236 	 */
237 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
238 
239 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn));
240 	return (NXGE_OK);
241 fail:
242 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
243 			"nxge_ipp_init: Fail to Reset IPP Port #%d\n",
244 			portn));
245 	return (NXGE_ERROR | rs);
246 }
247 
248 /* ARGSUSED */
249 nxge_status_t
250 nxge_ipp_enable(p_nxge_t nxgep)
251 {
252 	uint8_t portn;
253 	uint32_t config;
254 	npi_handle_t handle;
255 	uint32_t pkt_size;
256 	npi_status_t rs = NPI_SUCCESS;
257 
258 	handle = nxgep->npi_handle;
259 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
260 
261 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn));
262 
263 	config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC |
264 		CFG_IPP_TCP_UDP_CKSUM;
265 	if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS)
266 		goto fail;
267 	nxgep->ipp.config = config;
268 
269 	/* Set max packet size */
270 	pkt_size = IPP_MAX_PKT_SIZE;
271 	if ((rs = npi_ipp_set_max_pktsize(handle, portn,
272 			IPP_MAX_PKT_SIZE)) != NPI_SUCCESS)
273 		goto fail;
274 	nxgep->ipp.max_pkt_size = pkt_size;
275 
276 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn));
277 	return (NXGE_OK);
278 fail:
279 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
280 		"nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn));
281 	return (NXGE_ERROR | rs);
282 }
283 
284 /* ARGSUSED */
285 nxge_status_t
286 nxge_ipp_handle_sys_errors(p_nxge_t nxgep)
287 {
288 	npi_handle_t handle;
289 	npi_status_t rs = NPI_SUCCESS;
290 	p_nxge_ipp_stats_t statsp;
291 	ipp_status_t istatus;
292 	uint8_t portn;
293 	p_ipp_errlog_t errlogp;
294 	boolean_t rxport_fatal = B_FALSE;
295 	nxge_status_t status = NXGE_OK;
296 	uint8_t cnt8;
297 	uint16_t cnt16;
298 
299 	handle = nxgep->npi_handle;
300 	statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats;
301 	portn = nxgep->mac.portnum;
302 
303 	errlogp = (p_ipp_errlog_t)&statsp->errlog;
304 
305 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
306 		return (NXGE_ERROR | rs);
307 
308 	if (istatus.value == 0) {
309 		/*
310 		 * The error is not initiated from this port, so just exit.
311 		 */
312 		return (NXGE_OK);
313 	}
314 
315 	if (istatus.bits.w0.dfifo_missed_sop) {
316 		statsp->sop_miss++;
317 		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
318 					&errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
319 			return (NXGE_ERROR | rs);
320 		if ((rs = npi_ipp_get_state_mach(handle, portn,
321 				&errlogp->state_mach)) != NPI_SUCCESS)
322 			return (NXGE_ERROR | rs);
323 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
324 			NXGE_FM_EREPORT_IPP_SOP_MISS);
325 		if (statsp->sop_miss < IPP_MAX_ERR_SHOW)
326 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
327 				"nxge_ipp_err_evnts: fatal error: sop_miss\n"));
328 		rxport_fatal = B_TRUE;
329 	}
330 	if (istatus.bits.w0.dfifo_missed_eop) {
331 		statsp->eop_miss++;
332 		if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn,
333 				&errlogp->dfifo_rd_ptr)) != NPI_SUCCESS)
334 			return (NXGE_ERROR | rs);
335 		if ((rs = npi_ipp_get_state_mach(handle, portn,
336 				&errlogp->state_mach)) != NPI_SUCCESS)
337 			return (NXGE_ERROR | rs);
338 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
339 			NXGE_FM_EREPORT_IPP_EOP_MISS);
340 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
341 			"nxge_ipp_err_evnts: fatal error: eop_miss\n"));
342 		rxport_fatal = B_TRUE;
343 	}
344 	if (istatus.bits.w0.dfifo_uncorr_ecc_err) {
345 		boolean_t ue_ecc_valid;
346 
347 		if ((status = nxge_ipp_eccue_valid_check(nxgep,
348 				&ue_ecc_valid)) != NXGE_OK)
349 			return (status);
350 
351 		if (ue_ecc_valid) {
352 			statsp->dfifo_ue++;
353 			if ((rs = npi_ipp_get_ecc_syndrome(handle, portn,
354 					&errlogp->ecc_syndrome)) != NPI_SUCCESS)
355 				return (NXGE_ERROR | rs);
356 			NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
357 				NXGE_FM_EREPORT_IPP_DFIFO_UE);
358 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
359 				"nxge_ipp_err_evnts: fatal error: dfifo_ue\n"));
360 			rxport_fatal = B_TRUE;
361 		}
362 	}
363 	if (istatus.bits.w0.pre_fifo_perr) {
364 		statsp->pfifo_perr++;
365 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
366 			NXGE_FM_EREPORT_IPP_PFIFO_PERR);
367 		if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW)
368 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
369 				"nxge_ipp_err_evnts: "
370 				"fatal error: pre_pifo_perr\n"));
371 		rxport_fatal = B_TRUE;
372 	}
373 	if (istatus.bits.w0.pre_fifo_overrun) {
374 		statsp->pfifo_over++;
375 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
376 			NXGE_FM_EREPORT_IPP_PFIFO_OVER);
377 		if (statsp->pfifo_over < IPP_MAX_ERR_SHOW)
378 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
379 				"nxge_ipp_err_evnts: "
380 				"fatal error: pfifo_over\n"));
381 		rxport_fatal = B_TRUE;
382 	}
383 	if (istatus.bits.w0.pre_fifo_underrun) {
384 		statsp->pfifo_und++;
385 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
386 			NXGE_FM_EREPORT_IPP_PFIFO_UND);
387 		if (statsp->pfifo_und < IPP_MAX_ERR_SHOW)
388 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
389 				"nxge_ipp_err_evnts: "
390 				"fatal error: pfifo_und\n"));
391 		rxport_fatal = B_TRUE;
392 	}
393 	if (istatus.bits.w0.bad_cksum_cnt_ovfl) {
394 		/*
395 		 * Clear bit BAD_CS_MX of register IPP_INT_STAT
396 		 * by reading register IPP_BAD_CS_CNT
397 		 */
398 		(void) npi_ipp_get_cs_err_count(handle, portn, &cnt16);
399 		statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK;
400 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
401 			NXGE_FM_EREPORT_IPP_BAD_CS_MX);
402 		if (statsp->bad_cs_cnt < (IPP_MAX_ERR_SHOW *
403 				IPP_BAD_CS_CNT_MASK))
404 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
405 				"nxge_ipp_err_evnts: bad_cs_max\n"));
406 	}
407 	if (istatus.bits.w0.pkt_discard_cnt_ovfl) {
408 		/*
409 		 * Clear bit PKT_DIS_MX of register IPP_INT_STAT
410 		 * by reading register IPP_PKT_DIS
411 		 */
412 		(void) npi_ipp_get_pkt_dis_count(handle, portn, &cnt16);
413 		statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK;
414 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
415 			NXGE_FM_EREPORT_IPP_PKT_DIS_MX);
416 		if (statsp->pkt_dis_cnt < (IPP_MAX_ERR_SHOW *
417 				IPP_PKT_DIS_CNT_MASK))
418 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
419 				"nxge_ipp_err_evnts: pkt_dis_max\n"));
420 	}
421 	if (istatus.bits.w0.ecc_err_cnt_ovfl) {
422 		/*
423 		 * Clear bit ECC_ERR_MAX of register IPP_INI_STAT
424 		 * by reading register IPP_ECC
425 		 */
426 		(void) npi_ipp_get_ecc_err_count(handle, portn, &cnt8);
427 		statsp->ecc_err_cnt += IPP_ECC_CNT_MASK;
428 		NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
429 			NXGE_FM_EREPORT_IPP_ECC_ERR_MAX);
430 		if (statsp->ecc_err_cnt < (IPP_MAX_ERR_SHOW *
431 				IPP_ECC_CNT_MASK))
432 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
433 				"nxge_ipp_err_evnts: pkt_ecc_err_max\n"));
434 	}
435 	/*
436 	 * Making sure that error source is cleared if this is an injected
437 	 * error.
438 	 */
439 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
440 
441 	if (rxport_fatal) {
442 		NXGE_DEBUG_MSG((nxgep, IPP_CTL,
443 			" nxge_ipp_handle_sys_errors:"
444 			" fatal Error on  Port #%d\n", portn));
445 		status = nxge_ipp_fatal_err_recover(nxgep);
446 		if (status == NXGE_OK) {
447 			FM_SERVICE_RESTORED(nxgep);
448 		}
449 	}
450 	return (status);
451 }
452 
453 /* ARGSUSED */
454 void
455 nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id)
456 {
457 	ipp_status_t ipps;
458 	ipp_ecc_ctrl_t ecc_ctrl;
459 	uint8_t portn = nxgep->mac.portnum;
460 
461 	switch (err_id) {
462 	case NXGE_FM_EREPORT_IPP_DFIFO_UE:
463 		ecc_ctrl.value = 0;
464 		ecc_ctrl.bits.w0.cor_dbl = 1;
465 		ecc_ctrl.bits.w0.cor_1 = 1;
466 		ecc_ctrl.bits.w0.cor_lst = 1;
467 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
468 			(unsigned long long) ecc_ctrl.value);
469 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
470 			ecc_ctrl.value);
471 		break;
472 
473 	case NXGE_FM_EREPORT_IPP_DFIFO_CE:
474 		ecc_ctrl.value = 0;
475 		ecc_ctrl.bits.w0.cor_sng = 1;
476 		ecc_ctrl.bits.w0.cor_1 = 1;
477 		ecc_ctrl.bits.w0.cor_snd = 1;
478 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n",
479 			(unsigned long long) ecc_ctrl.value);
480 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG,
481 			ecc_ctrl.value);
482 		break;
483 
484 	case NXGE_FM_EREPORT_IPP_EOP_MISS:
485 	case NXGE_FM_EREPORT_IPP_SOP_MISS:
486 	case NXGE_FM_EREPORT_IPP_PFIFO_PERR:
487 	case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX:
488 	case NXGE_FM_EREPORT_IPP_PFIFO_OVER:
489 	case NXGE_FM_EREPORT_IPP_PFIFO_UND:
490 	case NXGE_FM_EREPORT_IPP_BAD_CS_MX:
491 	case NXGE_FM_EREPORT_IPP_PKT_DIS_MX:
492 	case NXGE_FM_EREPORT_IPP_RESET_FAIL:
493 		IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
494 			&ipps.value);
495 		if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS)
496 			ipps.bits.w0.dfifo_missed_eop = 1;
497 		else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS)
498 			ipps.bits.w0.dfifo_missed_sop = 1;
499 		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE)
500 			ipps.bits.w0.dfifo_uncorr_ecc_err = 1;
501 		else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE)
502 			ipps.bits.w0.dfifo_corr_ecc_err = 1;
503 		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR)
504 			ipps.bits.w0.pre_fifo_perr = 1;
505 		else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX) {
506 			/*
507 			 * Fill register IPP_ECC with max ECC-error-
508 			 * counter value (0xff) to set the ECC_ERR_MAX bit
509 			 * of the IPP_INT_STAT register and trigger an
510 			 * FMA ereport.
511 			 */
512 			IPP_REG_WR(nxgep->npi_handle, portn,
513 			    IPP_ECC_ERR_COUNTER_REG, IPP_ECC_CNT_MASK);
514 		} else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER)
515 			ipps.bits.w0.pre_fifo_overrun = 1;
516 		else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND)
517 			ipps.bits.w0.pre_fifo_underrun = 1;
518 		else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX) {
519 			/*
520 			 * Fill IPP_BAD_CS_CNT with max bad-checksum-counter
521 			 * value (0x3fff) to set the BAD_CS_MX bit of
522 			 * IPP_INT_STAT and trigger an FMA ereport.
523 			 */
524 			IPP_REG_WR(nxgep->npi_handle, portn,
525 			    IPP_TCP_CKSUM_ERR_CNT_REG, IPP_BAD_CS_CNT_MASK);
526 		} else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX) {
527 			/*
528 			 * Fill IPP_PKT_DIS with max packet-discard-counter
529 			 * value (0x3fff) to set the PKT_DIS_MX bit of
530 			 * IPP_INT_STAT and trigger an FMA ereport.
531 			 */
532 			IPP_REG_WR(nxgep->npi_handle, portn,
533 			    IPP_DISCARD_PKT_CNT_REG, IPP_PKT_DIS_CNT_MASK);
534 		}
535 		cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n",
536 			(unsigned long long) ipps.value);
537 		IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG,
538 			ipps.value);
539 		break;
540 	}
541 }
542 
543 /* ARGSUSED */
544 nxge_status_t
545 nxge_ipp_fatal_err_recover(p_nxge_t nxgep)
546 {
547 	npi_handle_t handle;
548 	npi_status_t rs = NPI_SUCCESS;
549 	nxge_status_t status = NXGE_OK;
550 	uint8_t portn;
551 	uint16_t wr_ptr;
552 	uint16_t rd_ptr;
553 	uint32_t try_count;
554 	uint32_t dfifo_entries;
555 	ipp_status_t istatus;
556 	uint32_t d0, d1, d2, d3, d4;
557 	int i;
558 
559 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover"));
560 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
561 		"Recovering from RxPort error..."));
562 
563 	handle = nxgep->npi_handle;
564 	portn = nxgep->mac.portnum;
565 
566 	/*
567 	 * Making sure that error source is cleared if this is an injected
568 	 * error.
569 	 */
570 	IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0);
571 
572 	/* Disable RxMAC */
573 	if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
574 		goto fail;
575 
576 	/* When recovering from IPP, RxDMA channel resets are not necessary */
577 	/* Reset ZCP CFIFO */
578 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn));
579 	if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS)
580 		goto fail;
581 
582 	/*
583 	 * Wait until ip read and write fifo pointers are equal
584 	 */
585 	(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
586 	(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
587 	try_count = 512;
588 
589 	while ((try_count > 0) && (rd_ptr != wr_ptr)) {
590 		(void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr);
591 		(void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr);
592 		try_count--;
593 	}
594 
595 	if (try_count == 0) {
596 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
597 			" nxge_ipp_reset: port%d IPP stalled..."
598 			" rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x",
599 			portn, rd_ptr, wr_ptr));
600 		/*
601 		 * This means the fatal error occurred on the first line of the
602 		 * fifo. In this case, just reset the IPP without draining the
603 		 * PFIFO.
604 		 */
605 	}
606 
607 	if (nxgep->niu_type == N2_NIU) {
608 		dfifo_entries = IPP_NIU_DFIFO_ENTRIES;
609 	} else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
610 		if (portn < 2)
611 			dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES;
612 		else
613 			dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES;
614 	} else {
615 		goto fail;
616 	}
617 
618 	/* Clean up DFIFO SRAM entries */
619 	for (i = 0; i < dfifo_entries; i++) {
620 		if ((rs = npi_ipp_write_dfifo(handle, portn,
621 				i, 0, 0, 0, 0, 0)) != NPI_SUCCESS)
622 			goto fail;
623 		if ((rs = npi_ipp_read_dfifo(handle, portn, i,
624 				&d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
625 			goto fail;
626 	}
627 
628 	/* Clear PFIFO DFIFO status bits */
629 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
630 		goto fail;
631 	if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS)
632 		goto fail;
633 
634 	/* Reset IPP */
635 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn));
636 	if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS)
637 		goto fail;
638 
639 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn));
640 	if (nxge_rx_mac_reset(nxgep) != NXGE_OK)
641 		goto fail;
642 
643 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn));
644 	if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK)
645 		goto fail;
646 
647 	NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn));
648 	if (nxge_rx_mac_enable(nxgep) != NXGE_OK)
649 		goto fail;
650 
651 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
652 		"Recovery successful, RxPort restored"));
653 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover"));
654 
655 	return (NXGE_OK);
656 fail:
657 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
658 	return (status | rs);
659 }
660 
661 /* ARGSUSED */
662 /*
663  *    A hardware bug may cause fake ECCUEs (ECC Uncorrectable Error).
664  * This function checks if a ECCUE is real(valid) or not.  It is not
665  * real if rd_ptr == wr_ptr.
666  *    The hardware module that has the bug is used not only by the IPP
667  * FIFO but also by the ZCP FIFO, therefore this function is also
668  * called by nxge_zcp_handle_sys_errors for validating the ZCP FIFO
669  * error.
670  */
671 nxge_status_t
672 nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid)
673 {
674 	npi_handle_t handle;
675 	npi_status_t rs = NPI_SUCCESS;
676 	uint8_t portn;
677 	uint16_t rd_ptr;
678 	uint16_t wr_ptr;
679 	uint16_t curr_rd_ptr;
680 	uint16_t curr_wr_ptr;
681 	uint32_t stall_cnt;
682 	uint32_t d0, d1, d2, d3, d4;
683 
684 	handle = nxgep->npi_handle;
685 	portn = nxgep->mac.portnum;
686 	*valid = B_TRUE;
687 
688 	if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr))
689 		!= NPI_SUCCESS)
690 		goto fail;
691 	if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr))
692 		!= NPI_SUCCESS)
693 		goto fail;
694 
695 	if (rd_ptr == wr_ptr) {
696 		*valid = B_FALSE; /* FIFO not stuck, so it's not a real ECCUE */
697 	} else {
698 		stall_cnt = 0;
699 		/*
700 		 * Check if the two pointers are moving, the ECCUE is invali
701 		 * if either pointer is moving, which indicates that the FIFO
702 		 * is functional.
703 		 */
704 		while (stall_cnt < 16) {
705 			if ((rs = npi_ipp_get_dfifo_rd_ptr(handle,
706 					portn, &curr_rd_ptr)) != NPI_SUCCESS)
707 				goto fail;
708 			if ((rs = npi_ipp_get_dfifo_wr_ptr(handle,
709 					portn, &curr_wr_ptr)) != NPI_SUCCESS)
710 				goto fail;
711 
712 			if (rd_ptr == curr_rd_ptr && wr_ptr == curr_wr_ptr) {
713 				stall_cnt++;
714 			} else {
715 				*valid = B_FALSE;
716 				break;
717 			}
718 		}
719 
720 		if (valid) {
721 			/*
722 			 * Further check to see if the ECCUE is valid. The
723 			 * error is real if the LSB of d4 is 1, which
724 			 * indicates that the data that has set the ECC
725 			 * error flag is the 16-byte internal control word.
726 			 */
727 			if ((rs = npi_ipp_read_dfifo(handle, portn, rd_ptr,
728 			    &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS)
729 				goto fail;
730 			if ((d4 & 0x1) == 0)	/* Not the 1st line */
731 				*valid = B_FALSE;
732 		}
733 	}
734 	return (NXGE_OK);
735 fail:
736 	return (NXGE_ERROR | rs);
737 }
738