1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <nxge_impl.h> 29 #include <nxge_ipp.h> 30 31 #define NXGE_IPP_FIFO_SYNC_TRY_COUNT 100 32 33 /* ARGSUSED */ 34 nxge_status_t 35 nxge_ipp_init(p_nxge_t nxgep) 36 { 37 uint8_t portn; 38 uint32_t config; 39 npi_handle_t handle; 40 uint32_t pkt_size; 41 ipp_status_t istatus; 42 npi_status_t rs = NPI_SUCCESS; 43 uint64_t val; 44 uint32_t d0, d1, d2, d3, d4; 45 int i; 46 uint32_t dfifo_entries; 47 48 handle = nxgep->npi_handle; 49 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 50 51 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_init: port%d", portn)); 52 53 /* Initialize ECC and parity in SRAM of DFIFO and PFIFO */ 54 if (nxgep->niu_type == N2_NIU) { 55 dfifo_entries = IPP_NIU_DFIFO_ENTRIES; 56 } else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 57 if (portn < 2) 58 dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES; 59 else 60 dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES; 61 } else { 62 goto fail; 63 } 64 65 for (i = 0; i < dfifo_entries; i++) { 66 if ((rs = npi_ipp_write_dfifo(handle, 67 portn, i, 0, 0, 0, 0, 0)) != NPI_SUCCESS) 68 goto fail; 69 if ((rs = npi_ipp_read_dfifo(handle, portn, 70 i, &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 71 goto fail; 72 } 73 74 /* Clear PFIFO DFIFO status bits */ 75 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 76 goto fail; 77 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 78 goto fail; 79 80 /* 81 * Soft reset to make sure we bring the FIFO pointers back to the 82 * original initial position. 83 */ 84 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 85 goto fail; 86 87 /* Clean up ECC counter */ 88 IPP_REG_RD(nxgep->npi_handle, portn, IPP_ECC_ERR_COUNTER_REG, &val); 89 IPP_REG_RD(nxgep->npi_handle, portn, IPP_TCP_CKSUM_ERR_CNT_REG, &val); 90 IPP_REG_RD(nxgep->npi_handle, portn, IPP_DISCARD_PKT_CNT_REG, &val); 91 92 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 93 goto fail; 94 95 /* Configure IPP port */ 96 if ((rs = npi_ipp_iconfig(handle, INIT, portn, ICFG_IPP_ALL)) 97 != NPI_SUCCESS) 98 goto fail; 99 nxgep->ipp.iconfig = ICFG_IPP_ALL; 100 101 config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC | 102 CFG_IPP_TCP_UDP_CKSUM; 103 if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS) 104 goto fail; 105 nxgep->ipp.config = config; 106 107 /* Set max packet size */ 108 pkt_size = IPP_MAX_PKT_SIZE; 109 if ((rs = npi_ipp_set_max_pktsize(handle, portn, 110 IPP_MAX_PKT_SIZE)) != NPI_SUCCESS) 111 goto fail; 112 nxgep->ipp.max_pkt_size = pkt_size; 113 114 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_init: port%d", portn)); 115 116 return (NXGE_OK); 117 fail: 118 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 119 "nxge_ipp_init: Fail to initialize IPP Port #%d\n", 120 portn)); 121 return (NXGE_ERROR | rs); 122 } 123 124 /* ARGSUSED */ 125 nxge_status_t 126 nxge_ipp_disable(p_nxge_t nxgep) 127 { 128 uint8_t portn; 129 uint32_t config; 130 npi_handle_t handle; 131 npi_status_t rs = NPI_SUCCESS; 132 uint16_t wr_ptr, rd_ptr; 133 uint32_t try_count; 134 135 handle = nxgep->npi_handle; 136 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 137 138 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_disable: port%d", portn)); 139 (void) nxge_rx_mac_disable(nxgep); 140 141 /* 142 * Wait until ip read and write fifo pointers are equal 143 */ 144 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 145 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 146 try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT; 147 148 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 149 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 150 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 151 try_count--; 152 } 153 154 if (try_count == 0) { 155 if ((rd_ptr != 0) && (wr_ptr != 1)) { 156 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 157 " nxge_ipp_disable: port%d failed" 158 " rd_fifo != wr_fifo", portn)); 159 goto fail; 160 } 161 } 162 /* disable the IPP */ 163 config = nxgep->ipp.config; 164 if ((rs = npi_ipp_config(handle, DISABLE, 165 portn, config)) != NPI_SUCCESS) 166 goto fail; 167 168 /* IPP soft reset */ 169 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 170 goto fail; 171 172 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_disable: port%d", portn)); 173 return (NXGE_OK); 174 fail: 175 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 176 "nxge_ipp_disable: Fail to disable IPP Port #%d\n", portn)); 177 return (NXGE_ERROR | rs); 178 } 179 180 /* ARGSUSED */ 181 nxge_status_t 182 nxge_ipp_reset(p_nxge_t nxgep) 183 { 184 uint8_t portn; 185 uint32_t config; 186 npi_handle_t handle; 187 npi_status_t rs = NPI_SUCCESS; 188 uint16_t wr_ptr, rd_ptr; 189 uint32_t try_count; 190 191 handle = nxgep->npi_handle; 192 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 193 194 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_reset: port%d", portn)); 195 196 /* disable the IPP */ 197 config = nxgep->ipp.config; 198 if ((rs = npi_ipp_config(handle, DISABLE, 199 portn, config)) != NPI_SUCCESS) 200 goto fail; 201 202 /* 203 * Wait until ip read and write fifo pointers are equal 204 */ 205 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 206 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 207 try_count = NXGE_IPP_FIFO_SYNC_TRY_COUNT; 208 209 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 210 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 211 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 212 try_count--; 213 } 214 215 if (try_count == 0) { 216 if ((rd_ptr != 0) && (wr_ptr != 1)) { 217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 218 " nxge_ipp_disable: port%d failed" 219 " rd_fifo != wr_fifo", portn)); 220 goto fail; 221 } 222 } 223 224 /* IPP soft reset */ 225 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) { 226 goto fail; 227 } 228 229 /* to reset control FIFO */ 230 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 231 goto fail; 232 233 /* 234 * Making sure that error source is cleared if this is an injected 235 * error. 236 */ 237 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 238 239 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_reset: port%d", portn)); 240 return (NXGE_OK); 241 fail: 242 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 243 "nxge_ipp_init: Fail to Reset IPP Port #%d\n", 244 portn)); 245 return (NXGE_ERROR | rs); 246 } 247 248 /* ARGSUSED */ 249 nxge_status_t 250 nxge_ipp_enable(p_nxge_t nxgep) 251 { 252 uint8_t portn; 253 uint32_t config; 254 npi_handle_t handle; 255 uint32_t pkt_size; 256 npi_status_t rs = NPI_SUCCESS; 257 258 handle = nxgep->npi_handle; 259 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 260 261 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "==> nxge_ipp_enable: port%d", portn)); 262 263 config = CFG_IPP | CFG_IPP_DFIFO_ECC_CORRECT | CFG_IPP_DROP_BAD_CRC | 264 CFG_IPP_TCP_UDP_CKSUM; 265 if ((rs = npi_ipp_config(handle, INIT, portn, config)) != NPI_SUCCESS) 266 goto fail; 267 nxgep->ipp.config = config; 268 269 /* Set max packet size */ 270 pkt_size = IPP_MAX_PKT_SIZE; 271 if ((rs = npi_ipp_set_max_pktsize(handle, portn, 272 IPP_MAX_PKT_SIZE)) != NPI_SUCCESS) 273 goto fail; 274 nxgep->ipp.max_pkt_size = pkt_size; 275 276 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "<== nxge_ipp_enable: port%d", portn)); 277 return (NXGE_OK); 278 fail: 279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 280 "nxge_ipp_init: Fail to Enable IPP Port #%d\n", portn)); 281 return (NXGE_ERROR | rs); 282 } 283 284 /* ARGSUSED */ 285 nxge_status_t 286 nxge_ipp_handle_sys_errors(p_nxge_t nxgep) 287 { 288 npi_handle_t handle; 289 npi_status_t rs = NPI_SUCCESS; 290 p_nxge_ipp_stats_t statsp; 291 ipp_status_t istatus; 292 uint8_t portn; 293 p_ipp_errlog_t errlogp; 294 boolean_t rxport_fatal = B_FALSE; 295 nxge_status_t status = NXGE_OK; 296 uint8_t cnt8; 297 uint16_t cnt16; 298 299 handle = nxgep->npi_handle; 300 statsp = (p_nxge_ipp_stats_t)&nxgep->statsp->ipp_stats; 301 portn = nxgep->mac.portnum; 302 303 errlogp = (p_ipp_errlog_t)&statsp->errlog; 304 305 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 306 return (NXGE_ERROR | rs); 307 308 if (istatus.value == 0) { 309 /* 310 * The error is not initiated from this port, so just exit. 311 */ 312 return (NXGE_OK); 313 } 314 315 if (istatus.bits.w0.dfifo_missed_sop) { 316 statsp->sop_miss++; 317 if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn, 318 &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS) 319 return (NXGE_ERROR | rs); 320 if ((rs = npi_ipp_get_state_mach(handle, portn, 321 &errlogp->state_mach)) != NPI_SUCCESS) 322 return (NXGE_ERROR | rs); 323 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 324 NXGE_FM_EREPORT_IPP_SOP_MISS); 325 if (statsp->sop_miss < IPP_MAX_ERR_SHOW) 326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 327 "nxge_ipp_err_evnts: fatal error: sop_miss\n")); 328 rxport_fatal = B_TRUE; 329 } 330 if (istatus.bits.w0.dfifo_missed_eop) { 331 statsp->eop_miss++; 332 if ((rs = npi_ipp_get_dfifo_eopm_rdptr(handle, portn, 333 &errlogp->dfifo_rd_ptr)) != NPI_SUCCESS) 334 return (NXGE_ERROR | rs); 335 if ((rs = npi_ipp_get_state_mach(handle, portn, 336 &errlogp->state_mach)) != NPI_SUCCESS) 337 return (NXGE_ERROR | rs); 338 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 339 NXGE_FM_EREPORT_IPP_EOP_MISS); 340 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 341 "nxge_ipp_err_evnts: fatal error: eop_miss\n")); 342 rxport_fatal = B_TRUE; 343 } 344 if (istatus.bits.w0.dfifo_uncorr_ecc_err) { 345 boolean_t ue_ecc_valid; 346 347 if ((status = nxge_ipp_eccue_valid_check(nxgep, 348 &ue_ecc_valid)) != NXGE_OK) 349 return (status); 350 351 if (ue_ecc_valid) { 352 statsp->dfifo_ue++; 353 if ((rs = npi_ipp_get_ecc_syndrome(handle, portn, 354 &errlogp->ecc_syndrome)) != NPI_SUCCESS) 355 return (NXGE_ERROR | rs); 356 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 357 NXGE_FM_EREPORT_IPP_DFIFO_UE); 358 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 359 "nxge_ipp_err_evnts: fatal error: dfifo_ue\n")); 360 rxport_fatal = B_TRUE; 361 } 362 } 363 if (istatus.bits.w0.pre_fifo_perr) { 364 statsp->pfifo_perr++; 365 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 366 NXGE_FM_EREPORT_IPP_PFIFO_PERR); 367 if (statsp->pfifo_perr < IPP_MAX_ERR_SHOW) 368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 369 "nxge_ipp_err_evnts: " 370 "fatal error: pre_pifo_perr\n")); 371 rxport_fatal = B_TRUE; 372 } 373 if (istatus.bits.w0.pre_fifo_overrun) { 374 statsp->pfifo_over++; 375 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 376 NXGE_FM_EREPORT_IPP_PFIFO_OVER); 377 if (statsp->pfifo_over < IPP_MAX_ERR_SHOW) 378 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 379 "nxge_ipp_err_evnts: " 380 "fatal error: pfifo_over\n")); 381 rxport_fatal = B_TRUE; 382 } 383 if (istatus.bits.w0.pre_fifo_underrun) { 384 statsp->pfifo_und++; 385 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 386 NXGE_FM_EREPORT_IPP_PFIFO_UND); 387 if (statsp->pfifo_und < IPP_MAX_ERR_SHOW) 388 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 389 "nxge_ipp_err_evnts: " 390 "fatal error: pfifo_und\n")); 391 rxport_fatal = B_TRUE; 392 } 393 if (istatus.bits.w0.bad_cksum_cnt_ovfl) { 394 /* Clear the IPP_BAD_CS_CNT counter by reading it */ 395 (void) npi_ipp_get_cs_err_count(handle, portn, &cnt16); 396 statsp->bad_cs_cnt += IPP_BAD_CS_CNT_MASK; 397 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 398 NXGE_FM_EREPORT_IPP_BAD_CS_MX); 399 if (statsp->bad_cs_cnt < (IPP_MAX_ERR_SHOW * 400 IPP_BAD_CS_CNT_MASK)) 401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 402 "nxge_ipp_err_evnts: bad_cs_max\n")); 403 } 404 if (istatus.bits.w0.pkt_discard_cnt_ovfl) { 405 /* Clear the IPP_PKT_DIS counter by reading it */ 406 (void) npi_ipp_get_pkt_dis_count(handle, portn, &cnt16); 407 statsp->pkt_dis_cnt += IPP_PKT_DIS_CNT_MASK; 408 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 409 NXGE_FM_EREPORT_IPP_PKT_DIS_MX); 410 if (statsp->pkt_dis_cnt < (IPP_MAX_ERR_SHOW * 411 IPP_PKT_DIS_CNT_MASK)) 412 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 413 "nxge_ipp_err_evnts: pkt_dis_max\n")); 414 } 415 if (istatus.bits.w0.ecc_err_cnt_ovfl) { 416 /* Clear the IPP_ECC counter by reading it */ 417 (void) npi_ipp_get_ecc_err_count(handle, portn, &cnt8); 418 statsp->ecc_err_cnt += IPP_ECC_CNT_MASK; 419 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 420 NXGE_FM_EREPORT_IPP_ECC_ERR_MAX); 421 if (statsp->ecc_err_cnt < (IPP_MAX_ERR_SHOW * 422 IPP_ECC_CNT_MASK)) 423 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 424 "nxge_ipp_err_evnts: pkt_ecc_err_max\n")); 425 } 426 /* 427 * Making sure that error source is cleared if this is an injected 428 * error. 429 */ 430 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 431 432 if (rxport_fatal) { 433 NXGE_DEBUG_MSG((nxgep, IPP_CTL, 434 " nxge_ipp_handle_sys_errors:" 435 " fatal Error on Port #%d\n", portn)); 436 status = nxge_ipp_fatal_err_recover(nxgep); 437 if (status == NXGE_OK) { 438 FM_SERVICE_RESTORED(nxgep); 439 } 440 } 441 return (status); 442 } 443 444 /* ARGSUSED */ 445 void 446 nxge_ipp_inject_err(p_nxge_t nxgep, uint32_t err_id) 447 { 448 ipp_status_t ipps; 449 ipp_ecc_ctrl_t ecc_ctrl; 450 uint8_t portn = nxgep->mac.portnum; 451 452 switch (err_id) { 453 case NXGE_FM_EREPORT_IPP_DFIFO_UE: 454 ecc_ctrl.value = 0; 455 ecc_ctrl.bits.w0.cor_dbl = 1; 456 ecc_ctrl.bits.w0.cor_1 = 1; 457 ecc_ctrl.bits.w0.cor_lst = 1; 458 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n", 459 (unsigned long long) ecc_ctrl.value); 460 IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG, 461 ecc_ctrl.value); 462 break; 463 464 case NXGE_FM_EREPORT_IPP_DFIFO_CE: 465 ecc_ctrl.value = 0; 466 ecc_ctrl.bits.w0.cor_sng = 1; 467 ecc_ctrl.bits.w0.cor_1 = 1; 468 ecc_ctrl.bits.w0.cor_snd = 1; 469 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_ECC_CTRL_REG\n", 470 (unsigned long long) ecc_ctrl.value); 471 IPP_REG_WR(nxgep->npi_handle, portn, IPP_ECC_CTRL_REG, 472 ecc_ctrl.value); 473 break; 474 475 case NXGE_FM_EREPORT_IPP_EOP_MISS: 476 case NXGE_FM_EREPORT_IPP_SOP_MISS: 477 case NXGE_FM_EREPORT_IPP_PFIFO_PERR: 478 case NXGE_FM_EREPORT_IPP_ECC_ERR_MAX: 479 case NXGE_FM_EREPORT_IPP_PFIFO_OVER: 480 case NXGE_FM_EREPORT_IPP_PFIFO_UND: 481 case NXGE_FM_EREPORT_IPP_BAD_CS_MX: 482 case NXGE_FM_EREPORT_IPP_PKT_DIS_MX: 483 case NXGE_FM_EREPORT_IPP_RESET_FAIL: 484 IPP_REG_RD(nxgep->npi_handle, portn, IPP_INT_STATUS_REG, 485 &ipps.value); 486 if (err_id == NXGE_FM_EREPORT_IPP_EOP_MISS) 487 ipps.bits.w0.dfifo_missed_eop = 1; 488 else if (err_id == NXGE_FM_EREPORT_IPP_SOP_MISS) 489 ipps.bits.w0.dfifo_missed_sop = 1; 490 else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_UE) 491 ipps.bits.w0.dfifo_uncorr_ecc_err = 1; 492 else if (err_id == NXGE_FM_EREPORT_IPP_DFIFO_CE) 493 ipps.bits.w0.dfifo_corr_ecc_err = 1; 494 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_PERR) 495 ipps.bits.w0.pre_fifo_perr = 1; 496 else if (err_id == NXGE_FM_EREPORT_IPP_ECC_ERR_MAX) 497 ipps.bits.w0.ecc_err_cnt_ovfl = 1; 498 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_OVER) 499 ipps.bits.w0.pre_fifo_overrun = 1; 500 else if (err_id == NXGE_FM_EREPORT_IPP_PFIFO_UND) 501 ipps.bits.w0.pre_fifo_underrun = 1; 502 else if (err_id == NXGE_FM_EREPORT_IPP_BAD_CS_MX) 503 ipps.bits.w0.bad_cksum_cnt_ovfl = 1; 504 else if (err_id == NXGE_FM_EREPORT_IPP_PKT_DIS_MX) 505 ipps.bits.w0.pkt_discard_cnt_ovfl = 1; 506 cmn_err(CE_NOTE, "!Write 0x%llx to IPP_INT_STATUS_REG\n", 507 (unsigned long long) ipps.value); 508 IPP_REG_WR(nxgep->npi_handle, portn, IPP_INT_STATUS_REG, 509 ipps.value); 510 break; 511 } 512 } 513 514 /* ARGSUSED */ 515 nxge_status_t 516 nxge_ipp_fatal_err_recover(p_nxge_t nxgep) 517 { 518 npi_handle_t handle; 519 npi_status_t rs = NPI_SUCCESS; 520 nxge_status_t status = NXGE_OK; 521 uint8_t portn; 522 uint16_t wr_ptr; 523 uint16_t rd_ptr; 524 uint32_t try_count; 525 uint32_t dfifo_entries; 526 ipp_status_t istatus; 527 uint32_t d0, d1, d2, d3, d4; 528 int i; 529 530 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_ipp_fatal_err_recover")); 531 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 532 "Recovering from RxPort error...")); 533 534 handle = nxgep->npi_handle; 535 portn = nxgep->mac.portnum; 536 537 /* 538 * Making sure that error source is cleared if this is an injected 539 * error. 540 */ 541 IPP_REG_WR(handle, portn, IPP_ECC_CTRL_REG, 0); 542 543 /* Disable RxMAC */ 544 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 545 goto fail; 546 547 /* When recovering from IPP, RxDMA channel resets are not necessary */ 548 /* Reset ZCP CFIFO */ 549 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset ZCP CFIFO...", portn)); 550 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 551 goto fail; 552 553 /* 554 * Wait until ip read and write fifo pointers are equal 555 */ 556 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 557 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 558 try_count = 512; 559 560 while ((try_count > 0) && (rd_ptr != wr_ptr)) { 561 (void) npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr); 562 (void) npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr); 563 try_count--; 564 } 565 566 if (try_count == 0) { 567 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 568 " nxge_ipp_reset: port%d IPP stalled..." 569 " rd_fifo_ptr = 0x%x wr_fifo_ptr = 0x%x", 570 portn, rd_ptr, wr_ptr)); 571 /* 572 * This means the fatal error occurred on the first line of the 573 * fifo. In this case, just reset the IPP without draining the 574 * PFIFO. 575 */ 576 } 577 578 if (nxgep->niu_type == N2_NIU) { 579 dfifo_entries = IPP_NIU_DFIFO_ENTRIES; 580 } else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 581 if (portn < 2) 582 dfifo_entries = IPP_P0_P1_DFIFO_ENTRIES; 583 else 584 dfifo_entries = IPP_P2_P3_DFIFO_ENTRIES; 585 } else { 586 goto fail; 587 } 588 589 /* Clean up DFIFO SRAM entries */ 590 for (i = 0; i < dfifo_entries; i++) { 591 if ((rs = npi_ipp_write_dfifo(handle, portn, 592 i, 0, 0, 0, 0, 0)) != NPI_SUCCESS) 593 goto fail; 594 if ((rs = npi_ipp_read_dfifo(handle, portn, i, 595 &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 596 goto fail; 597 } 598 599 /* Clear PFIFO DFIFO status bits */ 600 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 601 goto fail; 602 if ((rs = npi_ipp_get_status(handle, portn, &istatus)) != NPI_SUCCESS) 603 goto fail; 604 605 /* Reset IPP */ 606 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset IPP...", portn)); 607 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 608 goto fail; 609 610 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Reset RxMAC...", portn)); 611 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) 612 goto fail; 613 614 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Initialize RxMAC...", portn)); 615 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) 616 goto fail; 617 618 NXGE_DEBUG_MSG((nxgep, IPP_CTL, "port%d Enable RxMAC...", portn)); 619 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) 620 goto fail; 621 622 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 623 "Recovery Sucessful, RxPort Restored")); 624 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_ipp_fatal_err_recover")); 625 626 return (NXGE_OK); 627 fail: 628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 629 return (status | rs); 630 } 631 632 /* ARGSUSED */ 633 /* 634 * A hardware bug may cause fake ECCUEs (ECC Uncorrectable Error). 635 * This function checks if a ECCUE is real(valid) or not. It is not 636 * real if rd_ptr == wr_ptr. 637 * The hardware module that has the bug is used not only by the IPP 638 * FIFO but also by the ZCP FIFO, therefore this function is also 639 * called by nxge_zcp_handle_sys_errors for validating the ZCP FIFO 640 * error. 641 */ 642 nxge_status_t 643 nxge_ipp_eccue_valid_check(p_nxge_t nxgep, boolean_t *valid) 644 { 645 npi_handle_t handle; 646 npi_status_t rs = NPI_SUCCESS; 647 uint8_t portn; 648 uint16_t rd_ptr; 649 uint16_t wr_ptr; 650 uint16_t curr_rd_ptr; 651 uint16_t curr_wr_ptr; 652 uint32_t stall_cnt; 653 uint32_t d0, d1, d2, d3, d4; 654 655 handle = nxgep->npi_handle; 656 portn = nxgep->mac.portnum; 657 *valid = B_TRUE; 658 659 if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, portn, &rd_ptr)) 660 != NPI_SUCCESS) 661 goto fail; 662 if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, portn, &wr_ptr)) 663 != NPI_SUCCESS) 664 goto fail; 665 666 if (rd_ptr == wr_ptr) { 667 *valid = B_FALSE; /* FIFO not stuck, so it's not a real ECCUE */ 668 } else { 669 stall_cnt = 0; 670 /* 671 * Check if the two pointers are moving, the ECCUE is invali 672 * if either pointer is moving, which indicates that the FIFO 673 * is functional. 674 */ 675 while (stall_cnt < 16) { 676 if ((rs = npi_ipp_get_dfifo_rd_ptr(handle, 677 portn, &curr_rd_ptr)) != NPI_SUCCESS) 678 goto fail; 679 if ((rs = npi_ipp_get_dfifo_wr_ptr(handle, 680 portn, &curr_wr_ptr)) != NPI_SUCCESS) 681 goto fail; 682 683 if (rd_ptr == curr_rd_ptr && wr_ptr == curr_wr_ptr) { 684 stall_cnt++; 685 } else { 686 *valid = B_FALSE; 687 break; 688 } 689 } 690 691 if (valid) { 692 /* 693 * Futher check to see if the ECCUE is valid. The 694 * error is real if the LSB of d4 is 1, which 695 * indicates that the data that has set the ECC 696 * error flag is the 16-byte internal control word. 697 */ 698 if ((rs = npi_ipp_read_dfifo(handle, portn, rd_ptr, 699 &d0, &d1, &d2, &d3, &d4)) != NPI_SUCCESS) 700 goto fail; 701 if ((d4 & 0x1) == 0) /* Not the 1st line */ 702 *valid = B_FALSE; 703 } 704 } 705 return (NXGE_OK); 706 fail: 707 return (NXGE_ERROR | rs); 708 } 709