1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <nxge_impl.h> 29 #include <nxge_ipp.h> 30 31 #define NXGE_IPP_FIFO_SYNC_TRY_COUNT 100 32 33 /* ARGSUSED */ 34 nxge_status_t 35 nxge_ipp_init(p_nxge_t nxgep) 36 { 37 uint8_t portn; 38 uint32_t config; 39 npi_handle_t handle; 40 uint32_t pkt_size; 41 ipp_status_t istatus; 42 npi_status_t rs = NPI_SUCCESS; 43 uint64_t val; 44 uint32_t d0, d1, d2, d3, d4; 45 int i; 46 uint32_t dfifo_entries; 47 48 handle = nxgep->npi_handle; 49 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 50 51 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn)); 52 53 /* Initialize ECC and parity in SRAM of DFIFO and PFIFO */ 54 if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) { 55 if (portn < 2) 56 dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES; 57 else 58 dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES; 59 } else if (nxgep->niu_type == N2_NIU) { 60 dfifo_entries = IPP_NIU_DFIFO_ENTRIES; 61 } else 62 goto fail; 63 64 for (i = 0; i < dfifo_entries; i++) { 65 if ((rs = npi_ipp_write_dfifo(handle, 66 portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS) 67 goto fail; 68 if ((rs = npi_ipp_read_dfifo(handle, portn, 69 i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 70 goto fail; 71 } 72 73 /* Clear PFIFO DFIFO status bits */ 74 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 75 goto fail; 76 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 77 goto fail; 78 79 /* 80 * Soft reset to make sure we bring the FIFO pointers back to the 81 * original initial position. 82 */ 83 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 84 goto fail; 85 86 /* Clean up ECC counter */ 87 IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val); 88 IPP_REG_RD(nxgep->npi_handle, portn, IPP_TCP_CKSUM_ERR_CNT_REG, &val); 89 IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val); 90 91 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 92 goto fail; 93 94 /* Configure IPP port */ 95 if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL)) 96 != NPI_SUCCESS) 97 goto fail; 98 nxgep->ipp.iconfig = ICFG_IPP_ALL; 99 100 config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC | 101 CFG_IPP_TCP_UDP_CKSUM; 102 if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS) 103 goto fail; 104 nxgep->ipp.config = config; 105 106 /* Set max packet size */ 107 pkt_size = IPP_MAX_PKT_SIZE; 108 if ((rs = npi_ipp_set_max_pktsize(handle, portn, 109 IPP_MAX_PKT_SIZE)) != NPI_SUCCESS) 110 goto fail; 111 nxgep->ipp.max_pkt_size = pkt_size; 112 113 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn)); 114 115 return (NXGE_OK); 116 fail: 117 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 118 "nxge_ipp_init: Fail to initialize IPP Port #%d\n", 119 portn)); 120 return (NXGE_ERROR | rs); 121 } 122 123 /* ARGSUSED */ 124 nxge_status_t 125 nxge_ipp_disable(p_nxge_t nxgep) 126 { 127 uint8_t portn; 128 uint32_t config; 129 npi_handle_t handle; 130 npi_status_t rs = NPI_SUCCESS; 131 uint16_t wr_ptr, rd_ptr; 132 uint32_t try_count; 133 134 handle = nxgep->npi_handle; 135 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 136 137 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn)); 138 (void) nxge_rx_mac_disable(nxgep); 139 140 /* 141 * Wait until ip read and write fifo pointers are equal 142 */ 143 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 144 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 145 try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT; 146 147 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 148 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 149 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 150 try_count--; 151 } 152 153 if (try_count == 0) { 154 if ((rd_ptr != 0) && (wr_ptr != 1)) { 155 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 156 " nxge_ipp_disable: port%d failed" 157 " rd_fifo != wr_fifo", portn)); 158 goto fail; 159 } 160 } 161 /* disable the IPP */ 162 config = nxgep->ipp.config; 163 if ((rs = npi_ipp_config(handle, DISABLE, 164 portn, config)) != NPI_SUCCESS) 165 goto fail; 166 167 /* IPP soft reset */ 168 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 169 goto fail; 170 171 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn)); 172 return (NXGE_OK); 173 fail: 174 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 175 "nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn)); 176 return (NXGE_ERROR | rs); 177 } 178 179 /* ARGSUSED */ 180 nxge_status_t 181 nxge_ipp_reset(p_nxge_t nxgep) 182 { 183 uint8_t portn; 184 uint32_t config; 185 npi_handle_t handle; 186 npi_status_t rs = NPI_SUCCESS; 187 uint16_t wr_ptr, rd_ptr; 188 uint32_t try_count; 189 190 handle = nxgep->npi_handle; 191 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 192 193 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn)); 194 195 /* disable the IPP */ 196 config = nxgep->ipp.config; 197 if ((rs = npi_ipp_config(handle, DISABLE, 198 portn, config)) != NPI_SUCCESS) 199 goto fail; 200 201 /* 202 * Wait until ip read and write fifo pointers are equal 203 */ 204 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 205 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 206 try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT; 207 208 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 209 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 210 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 211 try_count--; 212 } 213 214 if (try_count == 0) { 215 if ((rd_ptr != 0) && (wr_ptr != 1)) { 216 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 217 " nxge_ipp_disable: port%d failed" 218 " rd_fifo != wr_fifo", portn)); 219 goto fail; 220 } 221 } 222 223 /* IPP soft reset */ 224 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) { 225 goto fail; 226 } 227 228 /* to reset control FIFO */ 229 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 230 goto fail; 231 232 /* 233 * Making sure that error source is cleared if this is an injected 234 * error. 235 */ 236 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 237 238 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn)); 239 return (NXGE_OK); 240 fail: 241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 242 "nxge_ipp_init: Fail to Reset IPP Port #%d\n", 243 portn)); 244 return (NXGE_ERROR | rs); 245 } 246 247 /* ARGSUSED */ 248 nxge_status_t 249 nxge_ipp_enable(p_nxge_t nxgep) 250 { 251 uint8_t portn; 252 uint32_t config; 253 npi_handle_t handle; 254 uint32_t pkt_size; 255 npi_status_t rs = NPI_SUCCESS; 256 257 handle = nxgep->npi_handle; 258 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 259 260 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn)); 261 262 config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC | 263 CFG_IPP_TCP_UDP_CKSUM; 264 if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS) 265 goto fail; 266 nxgep->ipp.config = config; 267 268 /* Set max packet size */ 269 pkt_size = IPP_MAX_PKT_SIZE; 270 if ((rs = npi_ipp_set_max_pktsize(handle, portn, 271 IPP_MAX_PKT_SIZE)) != NPI_SUCCESS) 272 goto fail; 273 nxgep->ipp.max_pkt_size = pkt_size; 274 275 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn)); 276 return (NXGE_OK); 277 fail: 278 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 279 "nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn)); 280 return (NXGE_ERROR | rs); 281 } 282 283 /* ARGSUSED */ 284 nxge_status_t 285 nxge_ipp_handle_sys_errors(p_nxge_t nxgep) 286 { 287 npi_handle_t handle; 288 npi_status_t rs = NPI_SUCCESS; 289 p_nxge_ipp_stats_t statsp; 290 ipp_status_t istatus; 291 uint8_t portn; 292 p_ipp_errlog_t errlogp; 293 boolean_t rxport_fatal = B_FALSE; 294 nxge_status_t status = NXGE_OK; 295 296 handle = nxgep->npi_handle; 297 statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats; 298 portn = nxgep->mac.portnum; 299 300 errlogp = (p_ipp_errlog_t)&statsp->errlog; 301 302 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 303 return (NXGE_ERROR | rs); 304 305 if (istatus.value == 0) { 306 /* 307 * The error is not initiated from this port, so just exit. 308 */ 309 return (NXGE_OK); 310 } 311 312 if (istatus.bits.w0.dfifo_missed_sop) { 313 statsp->sop_miss++; 314 if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn, 315 &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS) 316 return (NXGE_ERROR | rs); 317 if ((rs = npi_ipp_get_state_mach(handle, portn, 318 &errlogp->state_mach)) != NPI_SUCCESS) 319 return (NXGE_ERROR | rs); 320 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 321 NXGE_FM_EREPORT_IPP_SOP_MISS); 322 if (statsp->sop_miss < IPP_MAX_ERR_SHOW) 323 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 324 "nxge_ipp_err_evnts: fatal error: sop_miss\n")); 325 rxport_fatal = B_TRUE; 326 } 327 if (istatus.bits.w0.dfifo_missed_eop) { 328 statsp->eop_miss++; 329 if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn, 330 &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS) 331 return (NXGE_ERROR | rs); 332 if ((rs = npi_ipp_get_state_mach(handle, portn, 333 &errlogp->state_mach)) != NPI_SUCCESS) 334 return (NXGE_ERROR | rs); 335 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 336 NXGE_FM_EREPORT_IPP_EOP_MISS); 337 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 338 "nxge_ipp_err_evnts: fatal error: eop_miss\n")); 339 rxport_fatal = B_TRUE; 340 } 341 if (istatus.bits.w0.dfifo_uncorr_ecc_err) { 342 boolean_t ue_ecc_valid; 343 344 if ((status = nxge_ipp_eccue_valid_check(nxgep, 345 &ue_ecc_valid)) != NXGE_OK) 346 return (status); 347 348 if (ue_ecc_valid) { 349 statsp->dfifo_ue++; 350 if ((rs = npi_ipp_get_ecc_syndrome(handle, portn, 351 &errlogp->ecc_syndrome)) != NPI_SUCCESS) 352 return (NXGE_ERROR | rs); 353 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 354 NXGE_FM_EREPORT_IPP_DFIFO_UE); 355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 356 "nxge_ipp_err_evnts: fatal error: dfifo_ue\n")); 357 rxport_fatal = B_TRUE; 358 } 359 } 360 if (istatus.bits.w0.pre_fifo_perr) { 361 statsp->pfifo_perr++; 362 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 363 NXGE_FM_EREPORT_IPP_PFIFO_PERR); 364 if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW) 365 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 366 "nxge_ipp_err_evnts: " 367 "fatal error: pre_pifo_perr\n")); 368 rxport_fatal = B_TRUE; 369 } 370 if (istatus.bits.w0.pre_fifo_overrun) { 371 statsp->pfifo_over++; 372 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 373 NXGE_FM_EREPORT_IPP_PFIFO_OVER); 374 if (statsp->pfifo_over < IPP_MAX_ERR_SHOW) 375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 376 "nxge_ipp_err_evnts: " 377 "fatal error: pfifo_over\n")); 378 rxport_fatal = B_TRUE; 379 } 380 if (istatus.bits.w0.pre_fifo_underrun) { 381 statsp->pfifo_und++; 382 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 383 NXGE_FM_EREPORT_IPP_PFIFO_UND); 384 if (statsp->pfifo_und < IPP_MAX_ERR_SHOW) 385 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 386 "nxge_ipp_err_evnts: " 387 "fatal error: pfifo_und\n")); 388 rxport_fatal = B_TRUE; 389 } 390 if (istatus.bits.w0.bad_cksum_cnt_ovfl) { 391 statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK; 392 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 393 NXGE_FM_EREPORT_IPP_BAD_CS_MX); 394 if (statsp->bad_cs_cnt < (IPP_MAX_ERR_SHOW * 395 IPP_BAD_CS_CNT_MASK)) 396 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 397 "nxge_ipp_err_evnts: bad_cs_max\n")); 398 } 399 if (istatus.bits.w0.pkt_discard_cnt_ovfl) { 400 statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK; 401 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 402 NXGE_FM_EREPORT_IPP_PKT_DIS_MX); 403 if (statsp->pkt_dis_cnt < (IPP_MAX_ERR_SHOW * 404 IPP_PKT_DIS_CNT_MASK)) 405 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 406 "nxge_ipp_err_evnts: pkt_dis_max\n")); 407 } 408 409 /* 410 * Making sure that error source is cleared if this is an injected 411 * error. 412 */ 413 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 414 415 if (rxport_fatal) { 416 NXGE_DEBUG_MSG((nxgep, IPP_CTL, 417 " nxge_ipp_handle_sys_errors:" 418 " fatal Error on Port #%d\n", portn)); 419 status = nxge_ipp_fatal_err_recover(nxgep); 420 if (status == NXGE_OK) { 421 FM_SERVICE_RESTORED(nxgep); 422 } 423 } 424 return (status); 425 } 426 427 /* ARGSUSED */ 428 void 429 nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id) 430 { 431 ipp_status_t ipps; 432 ipp_ecc_ctrl_t ecc_ctrl; 433 uint8_t portn = nxgep->mac.portnum; 434 435 switch (err_id) { 436 case NXGE_FM_EREPORT_IPP_DFIFO_UE: 437 ecc_ctrl.value = 0; 438 ecc_ctrl.bits.w0.cor_dbl = 1; 439 ecc_ctrl.bits.w0.cor_1 = 1; 440 ecc_ctrl.bits.w0.cor_lst = 1; 441 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n", 442 (unsigned long long) ecc_ctrl.value); 443 IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG, 444 ecc_ctrl.value); 445 break; 446 447 case NXGE_FM_EREPORT_IPP_DFIFO_CE: 448 ecc_ctrl.value = 0; 449 ecc_ctrl.bits.w0.cor_sng = 1; 450 ecc_ctrl.bits.w0.cor_1 = 1; 451 ecc_ctrl.bits.w0.cor_snd = 1; 452 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n", 453 (unsigned long long) ecc_ctrl.value); 454 IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG, 455 ecc_ctrl.value); 456 break; 457 458 case NXGE_FM_EREPORT_IPP_EOP_MISS: 459 case NXGE_FM_EREPORT_IPP_SOP_MISS: 460 case NXGE_FM_EREPORT_IPP_PFIFO_PERR: 461 case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX: 462 case NXGE_FM_EREPORT_IPP_PFIFO_OVER: 463 case NXGE_FM_EREPORT_IPP_PFIFO_UND: 464 case NXGE_FM_EREPORT_IPP_BAD_CS_MX: 465 case NXGE_FM_EREPORT_IPP_PKT_DIS_MX: 466 case NXGE_FM_EREPORT_IPP_RESET_FAIL: 467 IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG, 468 &ipps.value); 469 if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS) 470 ipps.bits.w0.dfifo_missed_eop = 1; 471 else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS) 472 ipps.bits.w0.dfifo_missed_sop = 1; 473 else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE) 474 ipps.bits.w0.dfifo_uncorr_ecc_err = 1; 475 else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE) 476 ipps.bits.w0.dfifo_corr_ecc_err = 1; 477 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR) 478 ipps.bits.w0.pre_fifo_perr = 1; 479 else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX) 480 ipps.bits.w0.ecc_err_cnt_ovfl = 1; 481 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER) 482 ipps.bits.w0.pre_fifo_overrun = 1; 483 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND) 484 ipps.bits.w0.pre_fifo_underrun = 1; 485 else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX) 486 ipps.bits.w0.bad_cksum_cnt_ovfl = 1; 487 else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX) 488 ipps.bits.w0.pkt_discard_cnt_ovfl = 1; 489 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n", 490 (unsigned long long) ipps.value); 491 IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG, 492 ipps.value); 493 break; 494 } 495 } 496 497 /* ARGSUSED */ 498 nxge_status_t 499 nxge_ipp_fatal_err_recover(p_nxge_t nxgep) 500 { 501 npi_handle_t handle; 502 npi_status_t rs = NPI_SUCCESS; 503 nxge_status_t status = NXGE_OK; 504 uint8_t portn; 505 uint16_t wr_ptr; 506 uint16_t rd_ptr; 507 uint32_t try_count; 508 uint32_t dfifo_entries; 509 ipp_status_t istatus; 510 uint32_t d0, d1, d2, d3, d4; 511 int i; 512 513 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover")); 514 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 515 "Recovering from RxPort error...")); 516 517 handle = nxgep->npi_handle; 518 portn = nxgep->mac.portnum; 519 520 /* 521 * Making sure that error source is cleared if this is an injected 522 * error. 523 */ 524 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 525 526 /* Disable RxMAC */ 527 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 528 goto fail; 529 530 /* When recovering from IPP, RxDMA channel resets are not necessary */ 531 /* Reset ZCP CFIFO */ 532 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn)); 533 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 534 goto fail; 535 536 /* 537 * Wait until ip read and write fifo pointers are equal 538 */ 539 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 540 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 541 try_count = 512; 542 543 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 544 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 545 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 546 try_count--; 547 } 548 549 if (try_count == 0) { 550 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 551 " nxge_ipp_reset: port%d IPP stalled..." 552 " rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x", 553 portn, rd_ptr, wr_ptr)); 554 /* 555 * This means the fatal error occurred on the first line of the 556 * fifo. In this case, just reset the IPP without draining the 557 * PFIFO. 558 */ 559 } 560 561 if ((nxgep->niu_type == NEPTUNE) || (nxgep->niu_type == NEPTUNE_2)) { 562 if (portn < 2) 563 dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES; 564 else 565 dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES; 566 } else if (nxgep->niu_type == N2_NIU) { 567 dfifo_entries = IPP_NIU_DFIFO_ENTRIES; 568 } else 569 goto fail; 570 571 /* Clean up DFIFO SRAM entries */ 572 for (i = 0; i < dfifo_entries; i++) { 573 if ((rs = npi_ipp_write_dfifo(handle, portn, 574 i, 0, 0, 0, 0, 0)) != NPI_SUCCESS) 575 goto fail; 576 if ((rs = npi_ipp_read_dfifo(handle, portn, i, 577 &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 578 goto fail; 579 } 580 581 /* Clear PFIFO DFIFO status bits */ 582 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 583 goto fail; 584 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 585 goto fail; 586 587 /* Reset IPP */ 588 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn)); 589 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 590 goto fail; 591 592 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn)); 593 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) 594 goto fail; 595 596 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn)); 597 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) 598 goto fail; 599 600 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn)); 601 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) 602 goto fail; 603 604 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 605 "Recovery Sucessful, RxPort Restored")); 606 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover")); 607 608 return (NXGE_OK); 609 fail: 610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 611 return (status | rs); 612 } 613 614 /* ARGSUSED */ 615 /* 616 * A hardware bug may cause a faked ECCUE(ECC Uncorrectable Error). 617 * This function checks if a ECCUE is real(valid) or not. It is not 618 * real if rd_ptr == wr_ptr. 619 * The hardware module that has the bug is used not only by the IPP 620 * FIFO but also by the ZCP FIFO, therefore this function is also 621 * called by nxge_zcp_handle_sys_errors for validating the ZCP FIFO 622 * error. 623 */ 624 nxge_status_t 625 nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid) 626 { 627 npi_handle_t handle; 628 npi_status_t rs = NPI_SUCCESS; 629 uint8_t portn; 630 uint16_t rd_ptr; 631 uint16_t wr_ptr; 632 uint16_t curr_rd_ptr; 633 uint16_t curr_wr_ptr; 634 uint32_t stall_cnt; 635 uint32_t d0, d1, d2, d3, d4; 636 637 handle = nxgep->npi_handle; 638 portn = nxgep->mac.portnum; 639 *valid = B_TRUE; 640 641 if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr)) 642 != NPI_SUCCESS) 643 goto fail; 644 if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr)) 645 != NPI_SUCCESS) 646 goto fail; 647 648 if (rd_ptr == wr_ptr) { 649 *valid = B_FALSE; /* FIFO not stuck, so it's not a real ECCUE */ 650 } else { 651 stall_cnt = 0; 652 while (stall_cnt < 16) { 653 if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, 654 portn, &curr_rd_ptr)) != NPI_SUCCESS) 655 goto fail; 656 if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, 657 portn, &curr_wr_ptr)) != NPI_SUCCESS) 658 goto fail; 659 660 if ((rd_ptr == curr_rd_ptr) && (wr_ptr == curr_wr_ptr)) 661 stall_cnt++; 662 else { 663 *valid = B_FALSE; 664 break; 665 } 666 } 667 668 if (valid) { 669 /* futher check to see if ECC UE is valid */ 670 if ((rs = npi_ipp_read_dfifo(handle, portn, 671 rd_ptr, &d0, &d1, &d2, &d3, 672 &d4)) != NPI_SUCCESS) 673 goto fail; 674 if ((d4 & 0x1) == 0) /* Not the 1st line */ 675 *valid = B_FALSE; 676 } 677 } 678 return (NXGE_OK); 679 fail: 680 return (NXGE_ERROR | rs); 681 } 682