1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <nxge_impl.h> 29 #include <nxge_zcp.h> 30 #include <nxge_ipp.h> 31 32 nxge_status_t 33 nxge_zcp_init(p_nxge_t nxgep) 34 { 35 uint8_t portn; 36 npi_handle_t handle; 37 zcp_iconfig_t istatus; 38 npi_status_t rs = NPI_SUCCESS; 39 int i; 40 zcp_ram_unit_t w_data; 41 zcp_ram_unit_t r_data; 42 uint32_t cfifo_depth; 43 44 handle = nxgep->npi_handle; 45 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 46 47 if (nxgep->niu_type == N2_NIU) { 48 cfifo_depth = ZCP_NIU_CFIFO_DEPTH; 49 } else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 50 if (portn < 2) 51 cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH; 52 else 53 cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH; 54 } else { 55 goto fail; 56 } 57 58 /* Clean up CFIFO */ 59 w_data.w0 = 0; 60 w_data.w1 = 0; 61 w_data.w2 = 0; 62 w_data.w3 = 0; 63 w_data.w4 = 0; 64 65 for (i = 0; i < cfifo_depth; i++) { 66 if (npi_zcp_tt_cfifo_entry(handle, OP_SET, 67 portn, i, &w_data) != NPI_SUCCESS) 68 goto fail; 69 if (npi_zcp_tt_cfifo_entry(handle, OP_GET, 70 portn, i, &r_data) != NPI_SUCCESS) 71 goto fail; 72 } 73 74 if (npi_zcp_rest_cfifo_port(handle, portn) != NPI_SUCCESS) 75 goto fail; 76 77 /* 78 * Making sure that error source is cleared if this is an injected 79 * error. 80 */ 81 switch (portn) { 82 case 0: 83 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0); 84 break; 85 case 1: 86 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0); 87 break; 88 case 2: 89 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0); 90 break; 91 case 3: 92 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0); 93 break; 94 } 95 96 if ((rs = npi_zcp_clear_istatus(handle)) != NPI_SUCCESS) 97 return (NXGE_ERROR | rs); 98 if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS) 99 return (NXGE_ERROR | rs); 100 if ((rs = npi_zcp_iconfig(handle, INIT, ICFG_ZCP_ALL)) != NPI_SUCCESS) 101 goto fail; 102 103 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_init: port%d", portn)); 104 return (NXGE_OK); 105 106 fail: 107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 108 "nxge_zcp_init: Fail to initialize ZCP Port #%d\n", portn)); 109 return (NXGE_ERROR | rs); 110 } 111 112 nxge_status_t 113 nxge_zcp_handle_sys_errors(p_nxge_t nxgep) 114 { 115 npi_handle_t handle; 116 npi_status_t rs = NPI_SUCCESS; 117 p_nxge_zcp_stats_t statsp; 118 uint8_t portn; 119 zcp_iconfig_t istatus; 120 boolean_t rxport_fatal = B_FALSE; 121 nxge_status_t status = NXGE_OK; 122 123 handle = nxgep->npi_handle; 124 statsp = (p_nxge_zcp_stats_t)&nxgep->statsp->zcp_stats; 125 portn = nxgep->mac.portnum; 126 127 if ((rs = npi_zcp_get_istatus(handle, &istatus)) != NPI_SUCCESS) 128 return (NXGE_ERROR | rs); 129 130 if (istatus & ICFG_ZCP_RRFIFO_UNDERRUN) { 131 statsp->rrfifo_underrun++; 132 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 133 NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN); 134 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 135 "nxge_zcp_err_evnts: rrfifo_underrun")); 136 } 137 138 if (istatus & ICFG_ZCP_RRFIFO_OVERRUN) { 139 statsp->rrfifo_overrun++; 140 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 141 NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN); 142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 143 "nxge_zcp_err_evnts: buf_rrfifo_overrun")); 144 } 145 146 if (istatus & ICFG_ZCP_RSPFIFO_UNCORR_ERR) { 147 statsp->rspfifo_uncorr_err++; 148 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 149 NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR); 150 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 151 "nxge_zcp_err_evnts: rspfifo_uncorr_err")); 152 } 153 154 if (istatus & ICFG_ZCP_BUFFER_OVERFLOW) { 155 statsp->buffer_overflow++; 156 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 157 NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW); 158 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 159 "nxge_zcp_err_evnts: buffer_overflow")); 160 rxport_fatal = B_TRUE; 161 } 162 163 if (istatus & ICFG_ZCP_STAT_TBL_PERR) { 164 statsp->stat_tbl_perr++; 165 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 166 NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR); 167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 168 "nxge_zcp_err_evnts: stat_tbl_perr")); 169 } 170 171 if (istatus & ICFG_ZCP_DYN_TBL_PERR) { 172 statsp->dyn_tbl_perr++; 173 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 174 NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR); 175 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 176 "nxge_zcp_err_evnts: dyn_tbl_perr")); 177 } 178 179 if (istatus & ICFG_ZCP_BUF_TBL_PERR) { 180 statsp->buf_tbl_perr++; 181 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 182 NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR); 183 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 184 "nxge_zcp_err_evnts: buf_tbl_perr")); 185 } 186 187 if (istatus & ICFG_ZCP_TT_PROGRAM_ERR) { 188 statsp->tt_program_err++; 189 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 190 NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR); 191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 192 "nxge_zcp_err_evnts: tt_program_err")); 193 } 194 195 if (istatus & ICFG_ZCP_RSP_TT_INDEX_ERR) { 196 statsp->rsp_tt_index_err++; 197 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 198 NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR); 199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 200 "nxge_zcp_err_evnts: rsp_tt_index_err")); 201 } 202 203 if (istatus & ICFG_ZCP_SLV_TT_INDEX_ERR) { 204 statsp->slv_tt_index_err++; 205 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 206 NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR); 207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 208 "nxge_zcp_err_evnts: slv_tt_index_err")); 209 } 210 211 if (istatus & ICFG_ZCP_TT_INDEX_ERR) { 212 statsp->zcp_tt_index_err++; 213 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 214 NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR); 215 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 216 "nxge_zcp_err_evnts: tt_index_err")); 217 } 218 219 if (((portn == 0) && (istatus & ICFG_ZCP_CFIFO_ECC0)) || 220 ((portn == 1) && (istatus & ICFG_ZCP_CFIFO_ECC1)) || 221 ((portn == 2) && (istatus & ICFG_ZCP_CFIFO_ECC2)) || 222 ((portn == 3) && (istatus & ICFG_ZCP_CFIFO_ECC3))) { 223 boolean_t ue_ecc_valid; 224 225 if ((status = nxge_ipp_eccue_valid_check(nxgep, 226 &ue_ecc_valid)) != NXGE_OK) 227 return (status); 228 229 if (ue_ecc_valid) { 230 statsp->cfifo_ecc++; 231 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 232 NXGE_FM_EREPORT_ZCP_CFIFO_ECC); 233 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 234 "nxge_zcp_err_evnts: port%d buf_cfifo_ecc", 235 portn)); 236 rxport_fatal = B_TRUE; 237 } 238 } 239 240 /* 241 * Making sure that error source is cleared if this is an injected 242 * error. 243 */ 244 switch (portn) { 245 case 0: 246 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0); 247 break; 248 case 1: 249 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0); 250 break; 251 case 2: 252 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0); 253 break; 254 case 3: 255 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0); 256 break; 257 } 258 259 (void) npi_zcp_clear_istatus(handle); 260 261 if (rxport_fatal) { 262 NXGE_DEBUG_MSG((nxgep, IPP_CTL, 263 " nxge_zcp_handle_sys_errors:" 264 " fatal Error on Port #%d\n", portn)); 265 status = nxge_zcp_fatal_err_recover(nxgep); 266 if (status == NXGE_OK) { 267 FM_SERVICE_RESTORED(nxgep); 268 } 269 } 270 return (status); 271 } 272 273 void 274 nxge_zcp_inject_err(p_nxge_t nxgep, uint32_t err_id) 275 { 276 zcp_int_stat_reg_t zcps; 277 uint8_t portn = nxgep->mac.portnum; 278 zcp_ecc_ctrl_t ecc_ctrl; 279 280 switch (err_id) { 281 case NXGE_FM_EREPORT_ZCP_CFIFO_ECC: 282 ecc_ctrl.value = 0; 283 ecc_ctrl.bits.w0.cor_dbl = 1; 284 ecc_ctrl.bits.w0.cor_lst = 1; 285 ecc_ctrl.bits.w0.cor_all = 0; 286 switch (portn) { 287 case 0: 288 cmn_err(CE_NOTE, 289 "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n", 290 (unsigned long long) ecc_ctrl.value, portn); 291 NXGE_REG_WR64(nxgep->npi_handle, 292 ZCP_CFIFO_ECC_PORT0_REG, 293 ecc_ctrl.value); 294 break; 295 case 1: 296 cmn_err(CE_NOTE, 297 "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n", 298 (unsigned long long) ecc_ctrl.value, portn); 299 NXGE_REG_WR64(nxgep->npi_handle, 300 ZCP_CFIFO_ECC_PORT1_REG, 301 ecc_ctrl.value); 302 break; 303 case 2: 304 cmn_err(CE_NOTE, 305 "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n", 306 (unsigned long long) ecc_ctrl.value, portn); 307 NXGE_REG_WR64(nxgep->npi_handle, 308 ZCP_CFIFO_ECC_PORT2_REG, 309 ecc_ctrl.value); 310 break; 311 case 3: 312 cmn_err(CE_NOTE, 313 "!Write 0x%llx to port%d ZCP_CFIFO_ECC_PORT\n", 314 (unsigned long long) ecc_ctrl.value, portn); 315 NXGE_REG_WR64(nxgep->npi_handle, 316 ZCP_CFIFO_ECC_PORT3_REG, 317 ecc_ctrl.value); 318 break; 319 } 320 break; 321 322 case NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN: 323 case NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR: 324 case NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR: 325 case NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR: 326 case NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR: 327 case NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN: 328 case NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW: 329 case NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR: 330 case NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR: 331 case NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR: 332 case NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR: 333 NXGE_REG_RD64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG, 334 &zcps.value); 335 if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_UNDERRUN) 336 zcps.bits.ldw.rrfifo_urun = 1; 337 if (err_id == NXGE_FM_EREPORT_ZCP_RSPFIFO_UNCORR_ERR) 338 zcps.bits.ldw.rspfifo_uc_err = 1; 339 if (err_id == NXGE_FM_EREPORT_ZCP_STAT_TBL_PERR) 340 zcps.bits.ldw.stat_tbl_perr = 1; 341 if (err_id == NXGE_FM_EREPORT_ZCP_DYN_TBL_PERR) 342 zcps.bits.ldw.dyn_tbl_perr = 1; 343 if (err_id == NXGE_FM_EREPORT_ZCP_BUF_TBL_PERR) 344 zcps.bits.ldw.buf_tbl_perr = 1; 345 if (err_id == NXGE_FM_EREPORT_ZCP_CFIFO_ECC) { 346 switch (portn) { 347 case 0: 348 zcps.bits.ldw.cfifo_ecc0 = 1; 349 break; 350 case 1: 351 zcps.bits.ldw.cfifo_ecc1 = 1; 352 break; 353 case 2: 354 zcps.bits.ldw.cfifo_ecc2 = 1; 355 break; 356 case 3: 357 zcps.bits.ldw.cfifo_ecc3 = 1; 358 break; 359 } 360 } 361 362 default: 363 if (err_id == NXGE_FM_EREPORT_ZCP_RRFIFO_OVERRUN) 364 zcps.bits.ldw.rrfifo_orun = 1; 365 if (err_id == NXGE_FM_EREPORT_ZCP_BUFFER_OVERFLOW) 366 zcps.bits.ldw.buf_overflow = 1; 367 if (err_id == NXGE_FM_EREPORT_ZCP_TT_PROGRAM_ERR) 368 zcps.bits.ldw.tt_tbl_perr = 1; 369 if (err_id == NXGE_FM_EREPORT_ZCP_RSP_TT_INDEX_ERR) 370 zcps.bits.ldw.rsp_tt_index_err = 1; 371 if (err_id == NXGE_FM_EREPORT_ZCP_SLV_TT_INDEX_ERR) 372 zcps.bits.ldw.slv_tt_index_err = 1; 373 if (err_id == NXGE_FM_EREPORT_ZCP_TT_INDEX_ERR) 374 zcps.bits.ldw.zcp_tt_index_err = 1; 375 #if defined(__i386) 376 cmn_err(CE_NOTE, "!Write 0x%llx to ZCP_INT_STAT_TEST_REG\n", 377 zcps.value); 378 #else 379 cmn_err(CE_NOTE, "!Write 0x%lx to ZCP_INT_STAT_TEST_REG\n", 380 zcps.value); 381 #endif 382 NXGE_REG_WR64(nxgep->npi_handle, ZCP_INT_STAT_TEST_REG, 383 zcps.value); 384 break; 385 } 386 } 387 388 nxge_status_t 389 nxge_zcp_fatal_err_recover(p_nxge_t nxgep) 390 { 391 npi_handle_t handle; 392 npi_status_t rs = NPI_SUCCESS; 393 nxge_status_t status = NXGE_OK; 394 uint8_t portn; 395 zcp_ram_unit_t w_data; 396 zcp_ram_unit_t r_data; 397 uint32_t cfifo_depth; 398 int i; 399 400 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_zcp_fatal_err_recover")); 401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 402 "Recovering from RxPort error...")); 403 404 handle = nxgep->npi_handle; 405 portn = nxgep->mac.portnum; 406 407 /* Disable RxMAC */ 408 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 409 goto fail; 410 411 /* Make sure source is clear if this is an injected error */ 412 switch (portn) { 413 case 0: 414 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT0_REG, 0); 415 break; 416 case 1: 417 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT1_REG, 0); 418 break; 419 case 2: 420 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT2_REG, 0); 421 break; 422 case 3: 423 NXGE_REG_WR64(handle, ZCP_CFIFO_ECC_PORT3_REG, 0); 424 break; 425 } 426 427 /* Clear up CFIFO */ 428 if (nxgep->niu_type == N2_NIU) { 429 cfifo_depth = ZCP_NIU_CFIFO_DEPTH; 430 } else if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 431 if (portn < 2) 432 cfifo_depth = ZCP_P0_P1_CFIFO_DEPTH; 433 else 434 cfifo_depth = ZCP_P2_P3_CFIFO_DEPTH; 435 } else { 436 goto fail; 437 } 438 439 w_data.w0 = 0; 440 w_data.w1 = 0; 441 w_data.w2 = 0; 442 w_data.w3 = 0; 443 w_data.w4 = 0; 444 445 for (i = 0; i < cfifo_depth; i++) { 446 if (npi_zcp_tt_cfifo_entry(handle, OP_SET, 447 portn, i, &w_data) != NPI_SUCCESS) 448 goto fail; 449 if (npi_zcp_tt_cfifo_entry(handle, OP_GET, 450 portn, i, &r_data) != NPI_SUCCESS) 451 goto fail; 452 } 453 454 /* When recovering from ZCP, RxDMA channel resets are not necessary */ 455 /* Reset ZCP CFIFO */ 456 NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset ZCP CFIFO...", portn)); 457 if ((rs = npi_zcp_rest_cfifo_port(handle, portn)) != NPI_SUCCESS) 458 goto fail; 459 460 /* Reset IPP */ 461 NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset IPP...", portn)); 462 if ((rs = npi_ipp_reset(handle, portn)) != NPI_SUCCESS) 463 goto fail; 464 465 NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Reset RxMAC...", portn)); 466 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) 467 goto fail; 468 469 NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Initialize RxMAC...", portn)); 470 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) 471 goto fail; 472 473 NXGE_DEBUG_MSG((nxgep, RX_CTL, "port%d Enable RxMAC...", portn)); 474 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) 475 goto fail; 476 477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 478 "Recovery Sucessful, RxPort Restored")); 479 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_zcp_fatal_err_recover")); 480 return (NXGE_OK); 481 fail: 482 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 483 return (status | rs); 484 } 485