1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <nxge_impl.h> 29 #include <nxge_ipp.h> 30 31 #define NXGE_IPP_FIFO_SYNC_TRY_COUNT 100 32 33 /* ARGSUSED */ 34 nxge_status_t 35 nxge_ipp_init(p_nxge_t nxgep) 36 { 37 uint8_t portn; 38 uint32_t config; 39 npi_handle_t handle; 40 uint32_t pkt_size; 41 ipp_status_t istatus; 42 npi_status_t rs = NPI_SUCCESS; 43 uint64_t val; 44 uint32_t d0, d1, d2, d3, d4; 45 int i; 46 uint32_t dfifo_entries; 47 48 handle = nxgep->npi_handle; 49 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 50 51 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn)); 52 53 /* Initialize ECC and parity in SRAM of DFIFO and PFIFO */ 54 if (nxgep->niu_type == N2_NIU) { 55 dfifo_entries = IPP_NIU_DFIFO_ENTRIES; 56 } else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) { 57 if (portn < 2) 58 dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES; 59 else 60 dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES; 61 } else { 62 goto fail; 63 } 64 65 for (i = 0; i < dfifo_entries; i++) { 66 if ((rs = npi_ipp_write_dfifo(handle, 67 portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS) 68 goto fail; 69 if ((rs = npi_ipp_read_dfifo(handle, portn, 70 i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 71 goto fail; 72 } 73 74 /* Clear PFIFO DFIFO status bits */ 75 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 76 goto fail; 77 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 78 goto fail; 79 80 /* 81 * Soft reset to make sure we bring the FIFO pointers back to the 82 * original initial position. 83 */ 84 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 85 goto fail; 86 87 /* Clean up ECC counter */ 88 IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val); 89 IPP_REG_RD(nxgep->npi_handle, portn, IPP_TCP_CKSUM_ERR_CNT_REG, &val); 90 IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val); 91 92 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 93 goto fail; 94 95 /* Configure IPP port */ 96 if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL)) 97 != NPI_SUCCESS) 98 goto fail; 99 nxgep->ipp.iconfig = ICFG_IPP_ALL; 100 101 config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC | 102 CFG_IPP_TCP_UDP_CKSUM; 103 if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS) 104 goto fail; 105 nxgep->ipp.config = config; 106 107 /* Set max packet size */ 108 pkt_size = IPP_MAX_PKT_SIZE; 109 if ((rs = npi_ipp_set_max_pktsize(handle, portn, 110 IPP_MAX_PKT_SIZE)) != NPI_SUCCESS) 111 goto fail; 112 nxgep->ipp.max_pkt_size = pkt_size; 113 114 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn)); 115 116 return (NXGE_OK); 117 fail: 118 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 119 "nxge_ipp_init: Fail to initialize IPP Port #%d\n", 120 portn)); 121 return (NXGE_ERROR | rs); 122 } 123 124 /* ARGSUSED */ 125 nxge_status_t 126 nxge_ipp_disable(p_nxge_t nxgep) 127 { 128 uint8_t portn; 129 uint32_t config; 130 npi_handle_t handle; 131 npi_status_t rs = NPI_SUCCESS; 132 uint16_t wr_ptr, rd_ptr; 133 uint32_t try_count; 134 135 handle = nxgep->npi_handle; 136 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 137 138 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn)); 139 (void) nxge_rx_mac_disable(nxgep); 140 141 /* 142 * Wait until ip read and write fifo pointers are equal 143 */ 144 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 145 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 146 try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT; 147 148 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 149 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 150 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 151 try_count--; 152 } 153 154 if (try_count == 0) { 155 if ((rd_ptr != 0) && (wr_ptr != 1)) { 156 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 157 " nxge_ipp_disable: port%d failed" 158 " rd_fifo != wr_fifo", portn)); 159 goto fail; 160 } 161 } 162 /* disable the IPP */ 163 config = nxgep->ipp.config; 164 if ((rs = npi_ipp_config(handle, DISABLE, 165 portn, config)) != NPI_SUCCESS) 166 goto fail; 167 168 /* IPP soft reset */ 169 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 170 goto fail; 171 172 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn)); 173 return (NXGE_OK); 174 fail: 175 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 176 "nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn)); 177 return (NXGE_ERROR | rs); 178 } 179 180 /* ARGSUSED */ 181 nxge_status_t 182 nxge_ipp_reset(p_nxge_t nxgep) 183 { 184 uint8_t portn; 185 uint32_t config; 186 npi_handle_t handle; 187 npi_status_t rs = NPI_SUCCESS; 188 uint16_t wr_ptr, rd_ptr; 189 uint32_t try_count; 190 191 handle = nxgep->npi_handle; 192 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 193 194 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn)); 195 196 /* disable the IPP */ 197 config = nxgep->ipp.config; 198 if ((rs = npi_ipp_config(handle, DISABLE, 199 portn, config)) != NPI_SUCCESS) 200 goto fail; 201 202 /* 203 * Wait until ip read and write fifo pointers are equal 204 */ 205 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 206 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 207 try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT; 208 209 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 210 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 211 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 212 try_count--; 213 } 214 215 if (try_count == 0) { 216 if ((rd_ptr != 0) && (wr_ptr != 1)) { 217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 218 " nxge_ipp_disable: port%d failed" 219 " rd_fifo != wr_fifo", portn)); 220 goto fail; 221 } 222 } 223 224 /* IPP soft reset */ 225 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) { 226 goto fail; 227 } 228 229 /* to reset control FIFO */ 230 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 231 goto fail; 232 233 /* 234 * Making sure that error source is cleared if this is an injected 235 * error. 236 */ 237 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 238 239 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn)); 240 return (NXGE_OK); 241 fail: 242 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 243 "nxge_ipp_init: Fail to Reset IPP Port #%d\n", 244 portn)); 245 return (NXGE_ERROR | rs); 246 } 247 248 /* ARGSUSED */ 249 nxge_status_t 250 nxge_ipp_enable(p_nxge_t nxgep) 251 { 252 uint8_t portn; 253 uint32_t config; 254 npi_handle_t handle; 255 uint32_t pkt_size; 256 npi_status_t rs = NPI_SUCCESS; 257 258 handle = nxgep->npi_handle; 259 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 260 261 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn)); 262 263 config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC | 264 CFG_IPP_TCP_UDP_CKSUM; 265 if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS) 266 goto fail; 267 nxgep->ipp.config = config; 268 269 /* Set max packet size */ 270 pkt_size = IPP_MAX_PKT_SIZE; 271 if ((rs = npi_ipp_set_max_pktsize(handle, portn, 272 IPP_MAX_PKT_SIZE)) != NPI_SUCCESS) 273 goto fail; 274 nxgep->ipp.max_pkt_size = pkt_size; 275 276 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn)); 277 return (NXGE_OK); 278 fail: 279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 280 "nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn)); 281 return (NXGE_ERROR | rs); 282 } 283 284 /* ARGSUSED */ 285 nxge_status_t 286 nxge_ipp_handle_sys_errors(p_nxge_t nxgep) 287 { 288 npi_handle_t handle; 289 npi_status_t rs = NPI_SUCCESS; 290 p_nxge_ipp_stats_t statsp; 291 ipp_status_t istatus; 292 uint8_t portn; 293 p_ipp_errlog_t errlogp; 294 boolean_t rxport_fatal = B_FALSE; 295 nxge_status_t status = NXGE_OK; 296 297 handle = nxgep->npi_handle; 298 statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats; 299 portn = nxgep->mac.portnum; 300 301 errlogp = (p_ipp_errlog_t)&statsp->errlog; 302 303 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 304 return (NXGE_ERROR | rs); 305 306 if (istatus.value == 0) { 307 /* 308 * The error is not initiated from this port, so just exit. 309 */ 310 return (NXGE_OK); 311 } 312 313 if (istatus.bits.w0.dfifo_missed_sop) { 314 statsp->sop_miss++; 315 if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn, 316 &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS) 317 return (NXGE_ERROR | rs); 318 if ((rs = npi_ipp_get_state_mach(handle, portn, 319 &errlogp->state_mach)) != NPI_SUCCESS) 320 return (NXGE_ERROR | rs); 321 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 322 NXGE_FM_EREPORT_IPP_SOP_MISS); 323 if (statsp->sop_miss < IPP_MAX_ERR_SHOW) 324 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 325 "nxge_ipp_err_evnts: fatal error: sop_miss\n")); 326 rxport_fatal = B_TRUE; 327 } 328 if (istatus.bits.w0.dfifo_missed_eop) { 329 statsp->eop_miss++; 330 if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn, 331 &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS) 332 return (NXGE_ERROR | rs); 333 if ((rs = npi_ipp_get_state_mach(handle, portn, 334 &errlogp->state_mach)) != NPI_SUCCESS) 335 return (NXGE_ERROR | rs); 336 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 337 NXGE_FM_EREPORT_IPP_EOP_MISS); 338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 339 "nxge_ipp_err_evnts: fatal error: eop_miss\n")); 340 rxport_fatal = B_TRUE; 341 } 342 if (istatus.bits.w0.dfifo_uncorr_ecc_err) { 343 boolean_t ue_ecc_valid; 344 345 if ((status = nxge_ipp_eccue_valid_check(nxgep, 346 &ue_ecc_valid)) != NXGE_OK) 347 return (status); 348 349 if (ue_ecc_valid) { 350 statsp->dfifo_ue++; 351 if ((rs = npi_ipp_get_ecc_syndrome(handle, portn, 352 &errlogp->ecc_syndrome)) != NPI_SUCCESS) 353 return (NXGE_ERROR | rs); 354 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 355 NXGE_FM_EREPORT_IPP_DFIFO_UE); 356 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 357 "nxge_ipp_err_evnts: fatal error: dfifo_ue\n")); 358 rxport_fatal = B_TRUE; 359 } 360 } 361 if (istatus.bits.w0.pre_fifo_perr) { 362 statsp->pfifo_perr++; 363 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 364 NXGE_FM_EREPORT_IPP_PFIFO_PERR); 365 if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW) 366 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 367 "nxge_ipp_err_evnts: " 368 "fatal error: pre_pifo_perr\n")); 369 rxport_fatal = B_TRUE; 370 } 371 if (istatus.bits.w0.pre_fifo_overrun) { 372 statsp->pfifo_over++; 373 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 374 NXGE_FM_EREPORT_IPP_PFIFO_OVER); 375 if (statsp->pfifo_over < IPP_MAX_ERR_SHOW) 376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 377 "nxge_ipp_err_evnts: " 378 "fatal error: pfifo_over\n")); 379 rxport_fatal = B_TRUE; 380 } 381 if (istatus.bits.w0.pre_fifo_underrun) { 382 statsp->pfifo_und++; 383 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 384 NXGE_FM_EREPORT_IPP_PFIFO_UND); 385 if (statsp->pfifo_und < IPP_MAX_ERR_SHOW) 386 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 387 "nxge_ipp_err_evnts: " 388 "fatal error: pfifo_und\n")); 389 rxport_fatal = B_TRUE; 390 } 391 if (istatus.bits.w0.bad_cksum_cnt_ovfl) { 392 statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK; 393 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 394 NXGE_FM_EREPORT_IPP_BAD_CS_MX); 395 if (statsp->bad_cs_cnt < (IPP_MAX_ERR_SHOW * 396 IPP_BAD_CS_CNT_MASK)) 397 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 398 "nxge_ipp_err_evnts: bad_cs_max\n")); 399 } 400 if (istatus.bits.w0.pkt_discard_cnt_ovfl) { 401 statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK; 402 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 403 NXGE_FM_EREPORT_IPP_PKT_DIS_MX); 404 if (statsp->pkt_dis_cnt < (IPP_MAX_ERR_SHOW * 405 IPP_PKT_DIS_CNT_MASK)) 406 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 407 "nxge_ipp_err_evnts: pkt_dis_max\n")); 408 } 409 410 /* 411 * Making sure that error source is cleared if this is an injected 412 * error. 413 */ 414 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 415 416 if (rxport_fatal) { 417 NXGE_DEBUG_MSG((nxgep, IPP_CTL, 418 " nxge_ipp_handle_sys_errors:" 419 " fatal Error on Port #%d\n", portn)); 420 status = nxge_ipp_fatal_err_recover(nxgep); 421 if (status == NXGE_OK) { 422 FM_SERVICE_RESTORED(nxgep); 423 } 424 } 425 return (status); 426 } 427 428 /* ARGSUSED */ 429 void 430 nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id) 431 { 432 ipp_status_t ipps; 433 ipp_ecc_ctrl_t ecc_ctrl; 434 uint8_t portn = nxgep->mac.portnum; 435 436 switch (err_id) { 437 case NXGE_FM_EREPORT_IPP_DFIFO_UE: 438 ecc_ctrl.value = 0; 439 ecc_ctrl.bits.w0.cor_dbl = 1; 440 ecc_ctrl.bits.w0.cor_1 = 1; 441 ecc_ctrl.bits.w0.cor_lst = 1; 442 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n", 443 (unsigned long long) ecc_ctrl.value); 444 IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG, 445 ecc_ctrl.value); 446 break; 447 448 case NXGE_FM_EREPORT_IPP_DFIFO_CE: 449 ecc_ctrl.value = 0; 450 ecc_ctrl.bits.w0.cor_sng = 1; 451 ecc_ctrl.bits.w0.cor_1 = 1; 452 ecc_ctrl.bits.w0.cor_snd = 1; 453 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n", 454 (unsigned long long) ecc_ctrl.value); 455 IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG, 456 ecc_ctrl.value); 457 break; 458 459 case NXGE_FM_EREPORT_IPP_EOP_MISS: 460 case NXGE_FM_EREPORT_IPP_SOP_MISS: 461 case NXGE_FM_EREPORT_IPP_PFIFO_PERR: 462 case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX: 463 case NXGE_FM_EREPORT_IPP_PFIFO_OVER: 464 case NXGE_FM_EREPORT_IPP_PFIFO_UND: 465 case NXGE_FM_EREPORT_IPP_BAD_CS_MX: 466 case NXGE_FM_EREPORT_IPP_PKT_DIS_MX: 467 case NXGE_FM_EREPORT_IPP_RESET_FAIL: 468 IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG, 469 &ipps.value); 470 if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS) 471 ipps.bits.w0.dfifo_missed_eop = 1; 472 else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS) 473 ipps.bits.w0.dfifo_missed_sop = 1; 474 else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE) 475 ipps.bits.w0.dfifo_uncorr_ecc_err = 1; 476 else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE) 477 ipps.bits.w0.dfifo_corr_ecc_err = 1; 478 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR) 479 ipps.bits.w0.pre_fifo_perr = 1; 480 else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX) 481 ipps.bits.w0.ecc_err_cnt_ovfl = 1; 482 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER) 483 ipps.bits.w0.pre_fifo_overrun = 1; 484 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND) 485 ipps.bits.w0.pre_fifo_underrun = 1; 486 else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX) 487 ipps.bits.w0.bad_cksum_cnt_ovfl = 1; 488 else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX) 489 ipps.bits.w0.pkt_discard_cnt_ovfl = 1; 490 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n", 491 (unsigned long long) ipps.value); 492 IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG, 493 ipps.value); 494 break; 495 } 496 } 497 498 /* ARGSUSED */ 499 nxge_status_t 500 nxge_ipp_fatal_err_recover(p_nxge_t nxgep) 501 { 502 npi_handle_t handle; 503 npi_status_t rs = NPI_SUCCESS; 504 nxge_status_t status = NXGE_OK; 505 uint8_t portn; 506 uint16_t wr_ptr; 507 uint16_t rd_ptr; 508 uint32_t try_count; 509 uint32_t dfifo_entries; 510 ipp_status_t istatus; 511 uint32_t d0, d1, d2, d3, d4; 512 int i; 513 514 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover")); 515 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 516 "Recovering from RxPort error...")); 517 518 handle = nxgep->npi_handle; 519 portn = nxgep->mac.portnum; 520 521 /* 522 * Making sure that error source is cleared if this is an injected 523 * error. 524 */ 525 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 526 527 /* Disable RxMAC */ 528 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 529 goto fail; 530 531 /* When recovering from IPP, RxDMA channel resets are not necessary */ 532 /* Reset ZCP CFIFO */ 533 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn)); 534 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 535 goto fail; 536 537 /* 538 * Wait until ip read and write fifo pointers are equal 539 */ 540 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 541 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 542 try_count = 512; 543 544 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 545 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 546 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 547 try_count--; 548 } 549 550 if (try_count == 0) { 551 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 552 " nxge_ipp_reset: port%d IPP stalled..." 553 " rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x", 554 portn, rd_ptr, wr_ptr)); 555 /* 556 * This means the fatal error occurred on the first line of the 557 * fifo. In this case, just reset the IPP without draining the 558 * PFIFO. 559 */ 560 } 561 562 if (nxgep->niu_type == N2_NIU) { 563 dfifo_entries = IPP_NIU_DFIFO_ENTRIES; 564 } else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) { 565 if (portn < 2) 566 dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES; 567 else 568 dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES; 569 } else { 570 goto fail; 571 } 572 573 /* Clean up DFIFO SRAM entries */ 574 for (i = 0; i < dfifo_entries; i++) { 575 if ((rs = npi_ipp_write_dfifo(handle, portn, 576 i, 0, 0, 0, 0, 0)) != NPI_SUCCESS) 577 goto fail; 578 if ((rs = npi_ipp_read_dfifo(handle, portn, i, 579 &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 580 goto fail; 581 } 582 583 /* Clear PFIFO DFIFO status bits */ 584 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 585 goto fail; 586 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 587 goto fail; 588 589 /* Reset IPP */ 590 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn)); 591 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 592 goto fail; 593 594 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn)); 595 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) 596 goto fail; 597 598 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn)); 599 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) 600 goto fail; 601 602 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn)); 603 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) 604 goto fail; 605 606 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 607 "Recovery Sucessful, RxPort Restored")); 608 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover")); 609 610 return (NXGE_OK); 611 fail: 612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 613 return (status | rs); 614 } 615 616 /* ARGSUSED */ 617 /* 618 * A hardware bug may cause a faked ECCUE(ECC Uncorrectable Error). 619 * This function checks if a ECCUE is real(valid) or not. It is not 620 * real if rd_ptr == wr_ptr. 621 * The hardware module that has the bug is used not only by the IPP 622 * FIFO but also by the ZCP FIFO, therefore this function is also 623 * called by nxge_zcp_handle_sys_errors for validating the ZCP FIFO 624 * error. 625 */ 626 nxge_status_t 627 nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid) 628 { 629 npi_handle_t handle; 630 npi_status_t rs = NPI_SUCCESS; 631 uint8_t portn; 632 uint16_t rd_ptr; 633 uint16_t wr_ptr; 634 uint16_t curr_rd_ptr; 635 uint16_t curr_wr_ptr; 636 uint32_t stall_cnt; 637 uint32_t d0, d1, d2, d3, d4; 638 639 handle = nxgep->npi_handle; 640 portn = nxgep->mac.portnum; 641 *valid = B_TRUE; 642 643 if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr)) 644 != NPI_SUCCESS) 645 goto fail; 646 if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr)) 647 != NPI_SUCCESS) 648 goto fail; 649 650 if (rd_ptr == wr_ptr) { 651 *valid = B_FALSE; /* FIFO not stuck, so it's not a real ECCUE */ 652 } else { 653 stall_cnt = 0; 654 while (stall_cnt < 16) { 655 if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, 656 portn, &curr_rd_ptr)) != NPI_SUCCESS) 657 goto fail; 658 if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, 659 portn, &curr_wr_ptr)) != NPI_SUCCESS) 660 goto fail; 661 662 if ((rd_ptr == curr_rd_ptr) && (wr_ptr == curr_wr_ptr)) 663 stall_cnt++; 664 else { 665 *valid = B_FALSE; 666 break; 667 } 668 } 669 670 if (valid) { 671 /* futher check to see if ECC UE is valid */ 672 if ((rs = npi_ipp_read_dfifo(handle, portn, 673 rd_ptr, &d0, &d1, &d2, &d3, 674 &d4)) != NPI_SUCCESS) 675 goto fail; 676 if ((d4 & 0x1) == 0) /* Not the 1st line */ 677 *valid = B_FALSE; 678 } 679 } 680 return (NXGE_OK); 681 fail: 682 return (NXGE_ERROR | rs); 683 } 684