1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_txdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 #include <npi_tx_rd64.h> 30 #include <npi_tx_wr64.h> 31 #include <sys/llc1.h> 32 33 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 34 uint32_t nxge_tx_minfree = 32; 35 uint32_t nxge_tx_intr_thres = 0; 36 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 37 uint32_t nxge_tx_tiny_pack = 1; 38 uint32_t nxge_tx_use_bcopy = 1; 39 40 extern uint32_t nxge_tx_ring_size; 41 extern uint32_t nxge_bcopy_thresh; 42 extern uint32_t nxge_dvma_thresh; 43 extern uint32_t nxge_dma_stream_thresh; 44 extern dma_method_t nxge_force_dma; 45 extern uint32_t nxge_cksum_offload; 46 47 /* Device register access attributes for PIO. */ 48 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 49 /* Device descriptor access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 51 /* Device buffer access attributes for DMA. */ 52 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 53 extern ddi_dma_attr_t nxge_desc_dma_attr; 54 extern ddi_dma_attr_t nxge_tx_dma_attr; 55 56 extern int nxge_serial_tx(mblk_t *mp, void *arg); 57 58 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 59 60 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 67 68 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 69 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 70 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 71 72 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 73 p_nxge_dma_common_t *, p_tx_ring_t, 74 p_tx_mbox_t *); 75 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 76 p_tx_ring_t, p_tx_mbox_t); 77 78 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 79 p_tx_ring_t, p_tx_mbox_t); 80 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 81 82 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 83 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 84 p_nxge_ldv_t, tx_cs_t); 85 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 86 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 87 uint16_t, p_tx_ring_t); 88 89 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 90 p_tx_ring_t ring_p, uint16_t channel); 91 92 nxge_status_t 93 nxge_init_txdma_channels(p_nxge_t nxgep) 94 { 95 nxge_grp_set_t *set = &nxgep->tx_set; 96 int i, count; 97 98 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 99 100 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 101 if ((1 << i) & set->lg.map) { 102 int tdc; 103 nxge_grp_t *group = set->group[i]; 104 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 105 if ((1 << tdc) & group->map) { 106 if ((nxge_grp_dc_add(nxgep, 107 group, VP_BOUND_TX, tdc))) 108 return (NXGE_ERROR); 109 } 110 } 111 } 112 if (++count == set->lg.count) 113 break; 114 } 115 116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 117 118 return (NXGE_OK); 119 } 120 121 nxge_status_t 122 nxge_init_txdma_channel( 123 p_nxge_t nxge, 124 int channel) 125 { 126 nxge_status_t status; 127 128 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 129 130 status = nxge_map_txdma(nxge, channel); 131 if (status != NXGE_OK) { 132 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 133 "<== nxge_init_txdma_channel: status 0x%x", status)); 134 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 135 return (status); 136 } 137 138 status = nxge_txdma_hw_start(nxge, channel); 139 if (status != NXGE_OK) { 140 (void) nxge_unmap_txdma_channel(nxge, channel); 141 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 142 return (status); 143 } 144 145 if (!nxge->statsp->tdc_ksp[channel]) 146 nxge_setup_tdc_kstats(nxge, channel); 147 148 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 149 150 return (status); 151 } 152 153 void 154 nxge_uninit_txdma_channels(p_nxge_t nxgep) 155 { 156 nxge_grp_set_t *set = &nxgep->tx_set; 157 int tdc; 158 159 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 160 161 if (set->owned.map == 0) { 162 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 163 "nxge_uninit_txdma_channels: no channels")); 164 return; 165 } 166 167 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 168 if ((1 << tdc) & set->owned.map) { 169 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 170 } 171 } 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 174 } 175 176 void 177 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 178 { 179 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 180 181 if (nxgep->statsp->tdc_ksp[channel]) { 182 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 183 nxgep->statsp->tdc_ksp[channel] = 0; 184 } 185 186 (void) nxge_txdma_stop_channel(nxgep, channel); 187 nxge_unmap_txdma_channel(nxgep, channel); 188 189 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 190 "<== nxge_uninit_txdma_channel")); 191 } 192 193 void 194 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 195 uint32_t entries, uint32_t size) 196 { 197 size_t tsize; 198 *dest_p = *src_p; 199 tsize = size * entries; 200 dest_p->alength = tsize; 201 dest_p->nblocks = entries; 202 dest_p->block_size = size; 203 dest_p->offset += tsize; 204 205 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 206 src_p->alength -= tsize; 207 src_p->dma_cookie.dmac_laddress += tsize; 208 src_p->dma_cookie.dmac_size -= tsize; 209 } 210 211 /* 212 * nxge_reset_txdma_channel 213 * 214 * Reset a TDC. 215 * 216 * Arguments: 217 * nxgep 218 * channel The channel to reset. 219 * reg_data The current TX_CS. 220 * 221 * Notes: 222 * 223 * NPI/NXGE function calls: 224 * npi_txdma_channel_reset() 225 * npi_txdma_channel_control() 226 * 227 * Registers accessed: 228 * TX_CS DMC+0x40028 Transmit Control And Status 229 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 230 * 231 * Context: 232 * Any domain 233 */ 234 nxge_status_t 235 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 236 { 237 npi_status_t rs = NPI_SUCCESS; 238 nxge_status_t status = NXGE_OK; 239 npi_handle_t handle; 240 241 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 242 243 handle = NXGE_DEV_NPI_HANDLE(nxgep); 244 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 245 rs = npi_txdma_channel_reset(handle, channel); 246 } else { 247 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 248 channel); 249 } 250 251 if (rs != NPI_SUCCESS) { 252 status = NXGE_ERROR | rs; 253 } 254 255 /* 256 * Reset the tail (kick) register to 0. 257 * (Hardware will not reset it. Tx overflow fatal 258 * error if tail is not set to 0 after reset! 259 */ 260 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 261 262 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 263 return (status); 264 } 265 266 /* 267 * nxge_init_txdma_channel_event_mask 268 * 269 * Enable interrupts for a set of events. 270 * 271 * Arguments: 272 * nxgep 273 * channel The channel to map. 274 * mask_p The events to enable. 275 * 276 * Notes: 277 * 278 * NPI/NXGE function calls: 279 * npi_txdma_event_mask() 280 * 281 * Registers accessed: 282 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 283 * 284 * Context: 285 * Any domain 286 */ 287 nxge_status_t 288 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 289 p_tx_dma_ent_msk_t mask_p) 290 { 291 npi_handle_t handle; 292 npi_status_t rs = NPI_SUCCESS; 293 nxge_status_t status = NXGE_OK; 294 295 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 296 "<== nxge_init_txdma_channel_event_mask")); 297 298 handle = NXGE_DEV_NPI_HANDLE(nxgep); 299 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 300 if (rs != NPI_SUCCESS) { 301 status = NXGE_ERROR | rs; 302 } 303 304 return (status); 305 } 306 307 /* 308 * nxge_init_txdma_channel_cntl_stat 309 * 310 * Stop a TDC. If at first we don't succeed, inject an error. 311 * 312 * Arguments: 313 * nxgep 314 * channel The channel to stop. 315 * 316 * Notes: 317 * 318 * NPI/NXGE function calls: 319 * npi_txdma_control_status() 320 * 321 * Registers accessed: 322 * TX_CS DMC+0x40028 Transmit Control And Status 323 * 324 * Context: 325 * Any domain 326 */ 327 nxge_status_t 328 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 329 uint64_t reg_data) 330 { 331 npi_handle_t handle; 332 npi_status_t rs = NPI_SUCCESS; 333 nxge_status_t status = NXGE_OK; 334 335 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 336 "<== nxge_init_txdma_channel_cntl_stat")); 337 338 handle = NXGE_DEV_NPI_HANDLE(nxgep); 339 rs = npi_txdma_control_status(handle, OP_SET, channel, 340 (p_tx_cs_t)®_data); 341 342 if (rs != NPI_SUCCESS) { 343 status = NXGE_ERROR | rs; 344 } 345 346 return (status); 347 } 348 349 /* 350 * nxge_enable_txdma_channel 351 * 352 * Enable a TDC. 353 * 354 * Arguments: 355 * nxgep 356 * channel The channel to enable. 357 * tx_desc_p channel's transmit descriptor ring. 358 * mbox_p channel's mailbox, 359 * 360 * Notes: 361 * 362 * NPI/NXGE function calls: 363 * npi_txdma_ring_config() 364 * npi_txdma_mbox_config() 365 * npi_txdma_channel_init_enable() 366 * 367 * Registers accessed: 368 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 369 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 370 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 371 * TX_CS DMC+0x40028 Transmit Control And Status 372 * 373 * Context: 374 * Any domain 375 */ 376 nxge_status_t 377 nxge_enable_txdma_channel(p_nxge_t nxgep, 378 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 379 { 380 npi_handle_t handle; 381 npi_status_t rs = NPI_SUCCESS; 382 nxge_status_t status = NXGE_OK; 383 384 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 385 386 handle = NXGE_DEV_NPI_HANDLE(nxgep); 387 /* 388 * Use configuration data composed at init time. 389 * Write to hardware the transmit ring configurations. 390 */ 391 rs = npi_txdma_ring_config(handle, OP_SET, channel, 392 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 393 394 if (rs != NPI_SUCCESS) { 395 return (NXGE_ERROR | rs); 396 } 397 398 if (isLDOMguest(nxgep)) { 399 /* Add interrupt handler for this channel. */ 400 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 401 return (NXGE_ERROR); 402 } 403 404 /* Write to hardware the mailbox */ 405 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 406 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 407 408 if (rs != NPI_SUCCESS) { 409 return (NXGE_ERROR | rs); 410 } 411 412 /* Start the DMA engine. */ 413 rs = npi_txdma_channel_init_enable(handle, channel); 414 415 if (rs != NPI_SUCCESS) { 416 return (NXGE_ERROR | rs); 417 } 418 419 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 420 421 return (status); 422 } 423 424 void 425 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 426 boolean_t l4_cksum, int pkt_len, uint8_t npads, 427 p_tx_pkt_hdr_all_t pkthdrp, 428 t_uscalar_t start_offset, 429 t_uscalar_t stuff_offset) 430 { 431 p_tx_pkt_header_t hdrp; 432 p_mblk_t nmp; 433 uint64_t tmp; 434 size_t mblk_len; 435 size_t iph_len; 436 size_t hdrs_size; 437 uint8_t hdrs_buf[sizeof (struct ether_header) + 438 64 + sizeof (uint32_t)]; 439 uint8_t *cursor; 440 uint8_t *ip_buf; 441 uint16_t eth_type; 442 uint8_t ipproto; 443 boolean_t is_vlan = B_FALSE; 444 size_t eth_hdr_size; 445 446 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 447 448 /* 449 * Caller should zero out the headers first. 450 */ 451 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 452 453 if (fill_len) { 454 NXGE_DEBUG_MSG((NULL, TX_CTL, 455 "==> nxge_fill_tx_hdr: pkt_len %d " 456 "npads %d", pkt_len, npads)); 457 tmp = (uint64_t)pkt_len; 458 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 459 goto fill_tx_header_done; 460 } 461 462 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 463 464 /* 465 * mp is the original data packet (does not include the 466 * Neptune transmit header). 467 */ 468 nmp = mp; 469 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 470 "mp $%p b_rptr $%p len %d", 471 mp, nmp->b_rptr, MBLKL(nmp))); 472 /* copy ether_header from mblk to hdrs_buf */ 473 cursor = &hdrs_buf[0]; 474 tmp = sizeof (struct ether_vlan_header); 475 while ((nmp != NULL) && (tmp > 0)) { 476 size_t buflen; 477 mblk_len = MBLKL(nmp); 478 buflen = min((size_t)tmp, mblk_len); 479 bcopy(nmp->b_rptr, cursor, buflen); 480 cursor += buflen; 481 tmp -= buflen; 482 nmp = nmp->b_cont; 483 } 484 485 nmp = mp; 486 mblk_len = MBLKL(nmp); 487 ip_buf = NULL; 488 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 489 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 490 "ether type 0x%x", eth_type, hdrp->value)); 491 492 if (eth_type < ETHERMTU) { 493 tmp = 1ull; 494 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 495 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 496 "value 0x%llx", hdrp->value)); 497 if (*(hdrs_buf + sizeof (struct ether_header)) 498 == LLC_SNAP_SAP) { 499 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 500 sizeof (struct ether_header) + 6))); 501 NXGE_DEBUG_MSG((NULL, TX_CTL, 502 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 503 eth_type)); 504 } else { 505 goto fill_tx_header_done; 506 } 507 } else if (eth_type == VLAN_ETHERTYPE) { 508 tmp = 1ull; 509 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 510 511 eth_type = ntohs(((struct ether_vlan_header *) 512 hdrs_buf)->ether_type); 513 is_vlan = B_TRUE; 514 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 515 "value 0x%llx", hdrp->value)); 516 } 517 518 if (!is_vlan) { 519 eth_hdr_size = sizeof (struct ether_header); 520 } else { 521 eth_hdr_size = sizeof (struct ether_vlan_header); 522 } 523 524 switch (eth_type) { 525 case ETHERTYPE_IP: 526 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 527 ip_buf = nmp->b_rptr + eth_hdr_size; 528 mblk_len -= eth_hdr_size; 529 iph_len = ((*ip_buf) & 0x0f); 530 if (mblk_len > (iph_len + sizeof (uint32_t))) { 531 ip_buf = nmp->b_rptr; 532 ip_buf += eth_hdr_size; 533 } else { 534 ip_buf = NULL; 535 } 536 537 } 538 if (ip_buf == NULL) { 539 hdrs_size = 0; 540 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 541 while ((nmp) && (hdrs_size < 542 sizeof (hdrs_buf))) { 543 mblk_len = (size_t)nmp->b_wptr - 544 (size_t)nmp->b_rptr; 545 if (mblk_len >= 546 (sizeof (hdrs_buf) - hdrs_size)) 547 mblk_len = sizeof (hdrs_buf) - 548 hdrs_size; 549 bcopy(nmp->b_rptr, 550 &hdrs_buf[hdrs_size], mblk_len); 551 hdrs_size += mblk_len; 552 nmp = nmp->b_cont; 553 } 554 ip_buf = hdrs_buf; 555 ip_buf += eth_hdr_size; 556 iph_len = ((*ip_buf) & 0x0f); 557 } 558 559 ipproto = ip_buf[9]; 560 561 tmp = (uint64_t)iph_len; 562 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 563 tmp = (uint64_t)(eth_hdr_size >> 1); 564 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 565 566 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 567 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 568 "tmp 0x%x", 569 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 570 ipproto, tmp)); 571 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 572 "value 0x%llx", hdrp->value)); 573 574 break; 575 576 case ETHERTYPE_IPV6: 577 hdrs_size = 0; 578 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 579 while ((nmp) && (hdrs_size < 580 sizeof (hdrs_buf))) { 581 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 582 if (mblk_len >= 583 (sizeof (hdrs_buf) - hdrs_size)) 584 mblk_len = sizeof (hdrs_buf) - 585 hdrs_size; 586 bcopy(nmp->b_rptr, 587 &hdrs_buf[hdrs_size], mblk_len); 588 hdrs_size += mblk_len; 589 nmp = nmp->b_cont; 590 } 591 ip_buf = hdrs_buf; 592 ip_buf += eth_hdr_size; 593 594 tmp = 1ull; 595 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 596 597 tmp = (eth_hdr_size >> 1); 598 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 599 600 /* byte 6 is the next header protocol */ 601 ipproto = ip_buf[6]; 602 603 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 604 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 605 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 606 ipproto)); 607 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 608 "value 0x%llx", hdrp->value)); 609 610 break; 611 612 default: 613 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 614 goto fill_tx_header_done; 615 } 616 617 switch (ipproto) { 618 case IPPROTO_TCP: 619 NXGE_DEBUG_MSG((NULL, TX_CTL, 620 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 621 if (l4_cksum) { 622 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 623 hdrp->value |= 624 (((uint64_t)(start_offset >> 1)) << 625 TX_PKT_HEADER_L4START_SHIFT); 626 hdrp->value |= 627 (((uint64_t)(stuff_offset >> 1)) << 628 TX_PKT_HEADER_L4STUFF_SHIFT); 629 630 NXGE_DEBUG_MSG((NULL, TX_CTL, 631 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 632 "value 0x%llx", hdrp->value)); 633 } 634 635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 636 "value 0x%llx", hdrp->value)); 637 break; 638 639 case IPPROTO_UDP: 640 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 641 if (l4_cksum) { 642 if (!nxge_cksum_offload) { 643 uint16_t *up; 644 uint16_t cksum; 645 t_uscalar_t stuff_len; 646 647 /* 648 * The checksum field has the 649 * partial checksum. 650 * IP_CSUM() macro calls ip_cksum() which 651 * can add in the partial checksum. 652 */ 653 cksum = IP_CSUM(mp, start_offset, 0); 654 stuff_len = stuff_offset; 655 nmp = mp; 656 mblk_len = MBLKL(nmp); 657 while ((nmp != NULL) && 658 (mblk_len < stuff_len)) { 659 stuff_len -= mblk_len; 660 nmp = nmp->b_cont; 661 } 662 ASSERT(nmp); 663 up = (uint16_t *)(nmp->b_rptr + stuff_len); 664 665 *up = cksum; 666 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 667 NXGE_DEBUG_MSG((NULL, TX_CTL, 668 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 669 "use sw cksum " 670 "write to $%p cksum 0x%x content up 0x%x", 671 stuff_len, 672 up, 673 cksum, 674 *up)); 675 } else { 676 /* Hardware will compute the full checksum */ 677 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 678 hdrp->value |= 679 (((uint64_t)(start_offset >> 1)) << 680 TX_PKT_HEADER_L4START_SHIFT); 681 hdrp->value |= 682 (((uint64_t)(stuff_offset >> 1)) << 683 TX_PKT_HEADER_L4STUFF_SHIFT); 684 685 NXGE_DEBUG_MSG((NULL, TX_CTL, 686 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 687 " use partial checksum " 688 "cksum 0x%x ", 689 "value 0x%llx", 690 stuff_offset, 691 IP_CSUM(mp, start_offset, 0), 692 hdrp->value)); 693 } 694 } 695 696 NXGE_DEBUG_MSG((NULL, TX_CTL, 697 "==> nxge_tx_pkt_hdr_init: UDP" 698 "value 0x%llx", hdrp->value)); 699 break; 700 701 default: 702 goto fill_tx_header_done; 703 } 704 705 fill_tx_header_done: 706 NXGE_DEBUG_MSG((NULL, TX_CTL, 707 "==> nxge_fill_tx_hdr: pkt_len %d " 708 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 709 710 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 711 } 712 713 /*ARGSUSED*/ 714 p_mblk_t 715 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 716 { 717 p_mblk_t newmp = NULL; 718 719 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 720 NXGE_DEBUG_MSG((NULL, TX_CTL, 721 "<== nxge_tx_pkt_header_reserve: allocb failed")); 722 return (NULL); 723 } 724 725 NXGE_DEBUG_MSG((NULL, TX_CTL, 726 "==> nxge_tx_pkt_header_reserve: get new mp")); 727 DB_TYPE(newmp) = M_DATA; 728 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 729 linkb(newmp, mp); 730 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 731 732 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 733 "b_rptr $%p b_wptr $%p", 734 newmp->b_rptr, newmp->b_wptr)); 735 736 NXGE_DEBUG_MSG((NULL, TX_CTL, 737 "<== nxge_tx_pkt_header_reserve: use new mp")); 738 739 return (newmp); 740 } 741 742 int 743 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 744 { 745 uint_t nmblks; 746 ssize_t len; 747 uint_t pkt_len; 748 p_mblk_t nmp, bmp, tmp; 749 uint8_t *b_wptr; 750 751 NXGE_DEBUG_MSG((NULL, TX_CTL, 752 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 753 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 754 755 nmp = mp; 756 bmp = mp; 757 nmblks = 0; 758 pkt_len = 0; 759 *tot_xfer_len_p = 0; 760 761 while (nmp) { 762 len = MBLKL(nmp); 763 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 764 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 765 len, pkt_len, nmblks, 766 *tot_xfer_len_p)); 767 768 if (len <= 0) { 769 bmp = nmp; 770 nmp = nmp->b_cont; 771 NXGE_DEBUG_MSG((NULL, TX_CTL, 772 "==> nxge_tx_pkt_nmblocks: " 773 "len (0) pkt_len %d nmblks %d", 774 pkt_len, nmblks)); 775 continue; 776 } 777 778 *tot_xfer_len_p += len; 779 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 780 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 781 len, pkt_len, nmblks, 782 *tot_xfer_len_p)); 783 784 if (len < nxge_bcopy_thresh) { 785 NXGE_DEBUG_MSG((NULL, TX_CTL, 786 "==> nxge_tx_pkt_nmblocks: " 787 "len %d (< thresh) pkt_len %d nmblks %d", 788 len, pkt_len, nmblks)); 789 if (pkt_len == 0) 790 nmblks++; 791 pkt_len += len; 792 if (pkt_len >= nxge_bcopy_thresh) { 793 pkt_len = 0; 794 len = 0; 795 nmp = bmp; 796 } 797 } else { 798 NXGE_DEBUG_MSG((NULL, TX_CTL, 799 "==> nxge_tx_pkt_nmblocks: " 800 "len %d (> thresh) pkt_len %d nmblks %d", 801 len, pkt_len, nmblks)); 802 pkt_len = 0; 803 nmblks++; 804 /* 805 * Hardware limits the transfer length to 4K. 806 * If len is more than 4K, we need to break 807 * it up to at most 2 more blocks. 808 */ 809 if (len > TX_MAX_TRANSFER_LENGTH) { 810 uint32_t nsegs; 811 812 nsegs = 1; 813 NXGE_DEBUG_MSG((NULL, TX_CTL, 814 "==> nxge_tx_pkt_nmblocks: " 815 "len %d pkt_len %d nmblks %d nsegs %d", 816 len, pkt_len, nmblks, nsegs)); 817 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 818 ++nsegs; 819 } 820 do { 821 b_wptr = nmp->b_rptr + 822 TX_MAX_TRANSFER_LENGTH; 823 nmp->b_wptr = b_wptr; 824 if ((tmp = dupb(nmp)) == NULL) { 825 return (0); 826 } 827 tmp->b_rptr = b_wptr; 828 tmp->b_wptr = nmp->b_wptr; 829 tmp->b_cont = nmp->b_cont; 830 nmp->b_cont = tmp; 831 nmblks++; 832 if (--nsegs) { 833 nmp = tmp; 834 } 835 } while (nsegs); 836 nmp = tmp; 837 } 838 } 839 840 /* 841 * Hardware limits the transmit gather pointers to 15. 842 */ 843 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 844 TX_MAX_GATHER_POINTERS) { 845 NXGE_DEBUG_MSG((NULL, TX_CTL, 846 "==> nxge_tx_pkt_nmblocks: pull msg - " 847 "len %d pkt_len %d nmblks %d", 848 len, pkt_len, nmblks)); 849 /* Pull all message blocks from b_cont */ 850 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 851 return (0); 852 } 853 freemsg(nmp->b_cont); 854 nmp->b_cont = tmp; 855 pkt_len = 0; 856 } 857 bmp = nmp; 858 nmp = nmp->b_cont; 859 } 860 861 NXGE_DEBUG_MSG((NULL, TX_CTL, 862 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 863 "nmblks %d len %d tot_xfer_len %d", 864 mp->b_rptr, mp->b_wptr, nmblks, 865 MBLKL(mp), *tot_xfer_len_p)); 866 867 return (nmblks); 868 } 869 870 boolean_t 871 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 872 { 873 boolean_t status = B_TRUE; 874 p_nxge_dma_common_t tx_desc_dma_p; 875 nxge_dma_common_t desc_area; 876 p_tx_desc_t tx_desc_ring_vp; 877 p_tx_desc_t tx_desc_p; 878 p_tx_desc_t tx_desc_pp; 879 tx_desc_t r_tx_desc; 880 p_tx_msg_t tx_msg_ring; 881 p_tx_msg_t tx_msg_p; 882 npi_handle_t handle; 883 tx_ring_hdl_t tx_head; 884 uint32_t pkt_len; 885 uint_t tx_rd_index; 886 uint16_t head_index, tail_index; 887 uint8_t tdc; 888 boolean_t head_wrap, tail_wrap; 889 p_nxge_tx_ring_stats_t tdc_stats; 890 int rc; 891 892 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 893 894 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 895 (nmblks != 0)); 896 NXGE_DEBUG_MSG((nxgep, TX_CTL, 897 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 898 tx_ring_p->descs_pending, nxge_reclaim_pending, 899 nmblks)); 900 if (!status) { 901 tx_desc_dma_p = &tx_ring_p->tdc_desc; 902 desc_area = tx_ring_p->tdc_desc; 903 handle = NXGE_DEV_NPI_HANDLE(nxgep); 904 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 905 tx_desc_ring_vp = 906 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 907 tx_rd_index = tx_ring_p->rd_index; 908 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 909 tx_msg_ring = tx_ring_p->tx_msg_ring; 910 tx_msg_p = &tx_msg_ring[tx_rd_index]; 911 tdc = tx_ring_p->tdc; 912 tdc_stats = tx_ring_p->tdc_stats; 913 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 914 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 915 } 916 917 tail_index = tx_ring_p->wr_index; 918 tail_wrap = tx_ring_p->wr_index_wrap; 919 920 NXGE_DEBUG_MSG((nxgep, TX_CTL, 921 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 922 "tail_index %d tail_wrap %d " 923 "tx_desc_p $%p ($%p) ", 924 tdc, tx_rd_index, tail_index, tail_wrap, 925 tx_desc_p, (*(uint64_t *)tx_desc_p))); 926 /* 927 * Read the hardware maintained transmit head 928 * and wrap around bit. 929 */ 930 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 931 head_index = tx_head.bits.ldw.head; 932 head_wrap = tx_head.bits.ldw.wrap; 933 NXGE_DEBUG_MSG((nxgep, TX_CTL, 934 "==> nxge_txdma_reclaim: " 935 "tx_rd_index %d tail %d tail_wrap %d " 936 "head %d wrap %d", 937 tx_rd_index, tail_index, tail_wrap, 938 head_index, head_wrap)); 939 940 if (head_index == tail_index) { 941 if (TXDMA_RING_EMPTY(head_index, head_wrap, 942 tail_index, tail_wrap) && 943 (head_index == tx_rd_index)) { 944 NXGE_DEBUG_MSG((nxgep, TX_CTL, 945 "==> nxge_txdma_reclaim: EMPTY")); 946 return (B_TRUE); 947 } 948 949 NXGE_DEBUG_MSG((nxgep, TX_CTL, 950 "==> nxge_txdma_reclaim: Checking " 951 "if ring full")); 952 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 953 tail_wrap)) { 954 NXGE_DEBUG_MSG((nxgep, TX_CTL, 955 "==> nxge_txdma_reclaim: full")); 956 return (B_FALSE); 957 } 958 } 959 960 NXGE_DEBUG_MSG((nxgep, TX_CTL, 961 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 962 963 tx_desc_pp = &r_tx_desc; 964 while ((tx_rd_index != head_index) && 965 (tx_ring_p->descs_pending != 0)) { 966 967 NXGE_DEBUG_MSG((nxgep, TX_CTL, 968 "==> nxge_txdma_reclaim: Checking if pending")); 969 970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 971 "==> nxge_txdma_reclaim: " 972 "descs_pending %d ", 973 tx_ring_p->descs_pending)); 974 975 NXGE_DEBUG_MSG((nxgep, TX_CTL, 976 "==> nxge_txdma_reclaim: " 977 "(tx_rd_index %d head_index %d " 978 "(tx_desc_p $%p)", 979 tx_rd_index, head_index, 980 tx_desc_p)); 981 982 tx_desc_pp->value = tx_desc_p->value; 983 NXGE_DEBUG_MSG((nxgep, TX_CTL, 984 "==> nxge_txdma_reclaim: " 985 "(tx_rd_index %d head_index %d " 986 "tx_desc_p $%p (desc value 0x%llx) ", 987 tx_rd_index, head_index, 988 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 989 990 NXGE_DEBUG_MSG((nxgep, TX_CTL, 991 "==> nxge_txdma_reclaim: dump desc:")); 992 993 pkt_len = tx_desc_pp->bits.hdw.tr_len; 994 tdc_stats->obytes += pkt_len; 995 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 996 NXGE_DEBUG_MSG((nxgep, TX_CTL, 997 "==> nxge_txdma_reclaim: pkt_len %d " 998 "tdc channel %d opackets %d", 999 pkt_len, 1000 tdc, 1001 tdc_stats->opackets)); 1002 1003 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1004 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1005 "tx_desc_p = $%p " 1006 "tx_desc_pp = $%p " 1007 "index = %d", 1008 tx_desc_p, 1009 tx_desc_pp, 1010 tx_ring_p->rd_index)); 1011 (void) dvma_unload(tx_msg_p->dvma_handle, 1012 0, -1); 1013 tx_msg_p->dvma_handle = NULL; 1014 if (tx_ring_p->dvma_wr_index == 1015 tx_ring_p->dvma_wrap_mask) { 1016 tx_ring_p->dvma_wr_index = 0; 1017 } else { 1018 tx_ring_p->dvma_wr_index++; 1019 } 1020 tx_ring_p->dvma_pending--; 1021 } else if (tx_msg_p->flags.dma_type == 1022 USE_DMA) { 1023 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1024 "==> nxge_txdma_reclaim: " 1025 "USE DMA")); 1026 if (rc = ddi_dma_unbind_handle 1027 (tx_msg_p->dma_handle)) { 1028 cmn_err(CE_WARN, "!nxge_reclaim: " 1029 "ddi_dma_unbind_handle " 1030 "failed. status %d", rc); 1031 } 1032 } 1033 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1034 "==> nxge_txdma_reclaim: count packets")); 1035 /* 1036 * count a chained packet only once. 1037 */ 1038 if (tx_msg_p->tx_message != NULL) { 1039 freemsg(tx_msg_p->tx_message); 1040 tx_msg_p->tx_message = NULL; 1041 } 1042 1043 tx_msg_p->flags.dma_type = USE_NONE; 1044 tx_rd_index = tx_ring_p->rd_index; 1045 tx_rd_index = (tx_rd_index + 1) & 1046 tx_ring_p->tx_wrap_mask; 1047 tx_ring_p->rd_index = tx_rd_index; 1048 tx_ring_p->descs_pending--; 1049 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1050 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1051 } 1052 1053 status = (nmblks <= (tx_ring_p->tx_ring_size - 1054 tx_ring_p->descs_pending - 1055 TX_FULL_MARK)); 1056 if (status) { 1057 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1058 } 1059 } else { 1060 status = (nmblks <= 1061 (tx_ring_p->tx_ring_size - 1062 tx_ring_p->descs_pending - 1063 TX_FULL_MARK)); 1064 } 1065 1066 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1067 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1068 1069 return (status); 1070 } 1071 1072 /* 1073 * nxge_tx_intr 1074 * 1075 * Process a TDC interrupt 1076 * 1077 * Arguments: 1078 * arg1 A Logical Device state Vector (LSV) data structure. 1079 * arg2 nxge_t * 1080 * 1081 * Notes: 1082 * 1083 * NPI/NXGE function calls: 1084 * npi_txdma_control_status() 1085 * npi_intr_ldg_mgmt_set() 1086 * 1087 * nxge_tx_err_evnts() 1088 * nxge_txdma_reclaim() 1089 * 1090 * Registers accessed: 1091 * TX_CS DMC+0x40028 Transmit Control And Status 1092 * PIO_LDSV 1093 * 1094 * Context: 1095 * Any domain 1096 */ 1097 uint_t 1098 nxge_tx_intr(void *arg1, void *arg2) 1099 { 1100 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1101 p_nxge_t nxgep = (p_nxge_t)arg2; 1102 p_nxge_ldg_t ldgp; 1103 uint8_t channel; 1104 uint32_t vindex; 1105 npi_handle_t handle; 1106 tx_cs_t cs; 1107 p_tx_ring_t *tx_rings; 1108 p_tx_ring_t tx_ring_p; 1109 npi_status_t rs = NPI_SUCCESS; 1110 uint_t serviced = DDI_INTR_UNCLAIMED; 1111 nxge_status_t status = NXGE_OK; 1112 1113 if (ldvp == NULL) { 1114 NXGE_DEBUG_MSG((NULL, INT_CTL, 1115 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1116 nxgep, ldvp)); 1117 return (DDI_INTR_UNCLAIMED); 1118 } 1119 1120 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1121 nxgep = ldvp->nxgep; 1122 } 1123 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1124 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1125 nxgep, ldvp)); 1126 1127 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1128 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1129 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1130 "<== nxge_tx_intr: interface not started or intialized")); 1131 return (DDI_INTR_CLAIMED); 1132 } 1133 1134 /* 1135 * This interrupt handler is for a specific 1136 * transmit dma channel. 1137 */ 1138 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1139 /* Get the control and status for this channel. */ 1140 channel = ldvp->channel; 1141 ldgp = ldvp->ldgp; 1142 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1143 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1144 "channel %d", 1145 nxgep, ldvp, channel)); 1146 1147 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1148 vindex = ldvp->vdma_index; 1149 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1150 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1151 channel, vindex, rs)); 1152 if (!rs && cs.bits.ldw.mk) { 1153 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1154 "==> nxge_tx_intr:channel %d ring index %d " 1155 "status 0x%08x (mk bit set)", 1156 channel, vindex, rs)); 1157 tx_rings = nxgep->tx_rings->rings; 1158 tx_ring_p = tx_rings[vindex]; 1159 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1160 "==> nxge_tx_intr:channel %d ring index %d " 1161 "status 0x%08x (mk bit set, calling reclaim)", 1162 channel, vindex, rs)); 1163 1164 MUTEX_ENTER(&tx_ring_p->lock); 1165 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 1166 MUTEX_EXIT(&tx_ring_p->lock); 1167 mac_tx_update(nxgep->mach); 1168 } 1169 1170 /* 1171 * Process other transmit control and status. 1172 * Check the ldv state. 1173 */ 1174 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1175 /* 1176 * Rearm this logical group if this is a single device 1177 * group. 1178 */ 1179 if (ldgp->nldvs == 1) { 1180 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1181 "==> nxge_tx_intr: rearm")); 1182 if (status == NXGE_OK) { 1183 if (isLDOMguest(nxgep)) { 1184 nxge_hio_ldgimgn(nxgep, ldgp); 1185 } else { 1186 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1187 B_TRUE, ldgp->ldg_timer); 1188 } 1189 } 1190 } 1191 1192 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1193 serviced = DDI_INTR_CLAIMED; 1194 return (serviced); 1195 } 1196 1197 void 1198 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1199 { 1200 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1201 1202 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1203 1204 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1205 } 1206 1207 void 1208 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1209 { 1210 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1211 1212 (void) nxge_txdma_stop(nxgep); 1213 1214 (void) nxge_fixup_txdma_rings(nxgep); 1215 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1216 (void) nxge_tx_mac_enable(nxgep); 1217 (void) nxge_txdma_hw_kick(nxgep); 1218 1219 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1220 } 1221 1222 npi_status_t 1223 nxge_txdma_channel_disable( 1224 nxge_t *nxge, 1225 int channel) 1226 { 1227 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1228 npi_status_t rs; 1229 tdmc_intr_dbg_t intr_dbg; 1230 1231 /* 1232 * Stop the dma channel and wait for the stop-done. 1233 * If the stop-done bit is not present, then force 1234 * an error so TXC will stop. 1235 * All channels bound to this port need to be stopped 1236 * and reset after injecting an interrupt error. 1237 */ 1238 rs = npi_txdma_channel_disable(handle, channel); 1239 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1240 "==> nxge_txdma_channel_disable(%d) " 1241 "rs 0x%x", channel, rs)); 1242 if (rs != NPI_SUCCESS) { 1243 /* Inject any error */ 1244 intr_dbg.value = 0; 1245 intr_dbg.bits.ldw.nack_pref = 1; 1246 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1247 "==> nxge_txdma_hw_mode: " 1248 "channel %d (stop failed 0x%x) " 1249 "(inject err)", rs, channel)); 1250 (void) npi_txdma_inj_int_error_set( 1251 handle, channel, &intr_dbg); 1252 rs = npi_txdma_channel_disable(handle, channel); 1253 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1254 "==> nxge_txdma_hw_mode: " 1255 "channel %d (stop again 0x%x) " 1256 "(after inject err)", 1257 rs, channel)); 1258 } 1259 1260 return (rs); 1261 } 1262 1263 /* 1264 * nxge_txdma_hw_mode 1265 * 1266 * Toggle all TDCs on (enable) or off (disable). 1267 * 1268 * Arguments: 1269 * nxgep 1270 * enable Enable or disable a TDC. 1271 * 1272 * Notes: 1273 * 1274 * NPI/NXGE function calls: 1275 * npi_txdma_channel_enable(TX_CS) 1276 * npi_txdma_channel_disable(TX_CS) 1277 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1278 * 1279 * Registers accessed: 1280 * TX_CS DMC+0x40028 Transmit Control And Status 1281 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1282 * 1283 * Context: 1284 * Any domain 1285 */ 1286 nxge_status_t 1287 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1288 { 1289 nxge_grp_set_t *set = &nxgep->tx_set; 1290 1291 npi_handle_t handle; 1292 nxge_status_t status; 1293 npi_status_t rs; 1294 int tdc; 1295 1296 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1297 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1298 1299 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1300 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1301 "<== nxge_txdma_mode: not initialized")); 1302 return (NXGE_ERROR); 1303 } 1304 1305 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1306 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1307 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1308 return (NXGE_ERROR); 1309 } 1310 1311 /* Enable or disable all of the TDCs owned by us. */ 1312 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1313 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1314 if ((1 << tdc) & set->owned.map) { 1315 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1316 if (ring) { 1317 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1318 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1319 if (enable) { 1320 rs = npi_txdma_channel_enable 1321 (handle, tdc); 1322 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1323 "==> nxge_txdma_hw_mode: " 1324 "channel %d (enable) rs 0x%x", 1325 tdc, rs)); 1326 } else { 1327 rs = nxge_txdma_channel_disable 1328 (nxgep, tdc); 1329 } 1330 } 1331 } 1332 } 1333 1334 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1335 1336 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1337 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1338 1339 return (status); 1340 } 1341 1342 void 1343 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1344 { 1345 npi_handle_t handle; 1346 1347 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1348 "==> nxge_txdma_enable_channel: channel %d", channel)); 1349 1350 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1351 /* enable the transmit dma channels */ 1352 (void) npi_txdma_channel_enable(handle, channel); 1353 1354 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1355 } 1356 1357 void 1358 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1359 { 1360 npi_handle_t handle; 1361 1362 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1363 "==> nxge_txdma_disable_channel: channel %d", channel)); 1364 1365 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1366 /* stop the transmit dma channels */ 1367 (void) npi_txdma_channel_disable(handle, channel); 1368 1369 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1370 } 1371 1372 /* 1373 * nxge_txdma_stop_inj_err 1374 * 1375 * Stop a TDC. If at first we don't succeed, inject an error. 1376 * 1377 * Arguments: 1378 * nxgep 1379 * channel The channel to stop. 1380 * 1381 * Notes: 1382 * 1383 * NPI/NXGE function calls: 1384 * npi_txdma_channel_disable() 1385 * npi_txdma_inj_int_error_set() 1386 * #if defined(NXGE_DEBUG) 1387 * nxge_txdma_regs_dump_channels(nxgep); 1388 * #endif 1389 * 1390 * Registers accessed: 1391 * TX_CS DMC+0x40028 Transmit Control And Status 1392 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1393 * 1394 * Context: 1395 * Any domain 1396 */ 1397 int 1398 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1399 { 1400 npi_handle_t handle; 1401 tdmc_intr_dbg_t intr_dbg; 1402 int status; 1403 npi_status_t rs = NPI_SUCCESS; 1404 1405 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1406 /* 1407 * Stop the dma channel waits for the stop done. 1408 * If the stop done bit is not set, then create 1409 * an error. 1410 */ 1411 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1412 rs = npi_txdma_channel_disable(handle, channel); 1413 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1414 if (status == NXGE_OK) { 1415 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1416 "<== nxge_txdma_stop_inj_err (channel %d): " 1417 "stopped OK", channel)); 1418 return (status); 1419 } 1420 1421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1422 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1423 "injecting error", channel, rs)); 1424 /* Inject any error */ 1425 intr_dbg.value = 0; 1426 intr_dbg.bits.ldw.nack_pref = 1; 1427 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1428 1429 /* Stop done bit will be set as a result of error injection */ 1430 rs = npi_txdma_channel_disable(handle, channel); 1431 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1432 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1433 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1434 "<== nxge_txdma_stop_inj_err (channel %d): " 1435 "stopped OK ", channel)); 1436 return (status); 1437 } 1438 1439 #if defined(NXGE_DEBUG) 1440 nxge_txdma_regs_dump_channels(nxgep); 1441 #endif 1442 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1443 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1444 " (injected error but still not stopped)", channel, rs)); 1445 1446 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1447 return (status); 1448 } 1449 1450 /*ARGSUSED*/ 1451 void 1452 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1453 { 1454 nxge_grp_set_t *set = &nxgep->tx_set; 1455 int tdc; 1456 1457 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1458 1459 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1460 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1461 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1462 return; 1463 } 1464 1465 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1466 if ((1 << tdc) & set->owned.map) { 1467 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1468 if (ring) { 1469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1470 "==> nxge_fixup_txdma_rings: channel %d", 1471 tdc)); 1472 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1473 } 1474 } 1475 } 1476 1477 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1478 } 1479 1480 /*ARGSUSED*/ 1481 void 1482 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1483 { 1484 p_tx_ring_t ring_p; 1485 1486 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1487 ring_p = nxge_txdma_get_ring(nxgep, channel); 1488 if (ring_p == NULL) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1490 return; 1491 } 1492 1493 if (ring_p->tdc != channel) { 1494 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1495 "<== nxge_txdma_fix_channel: channel not matched " 1496 "ring tdc %d passed channel", 1497 ring_p->tdc, channel)); 1498 return; 1499 } 1500 1501 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1502 1503 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1504 } 1505 1506 /*ARGSUSED*/ 1507 void 1508 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1509 { 1510 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1511 1512 if (ring_p == NULL) { 1513 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1514 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1515 return; 1516 } 1517 1518 if (ring_p->tdc != channel) { 1519 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1520 "<== nxge_txdma_fixup_channel: channel not matched " 1521 "ring tdc %d passed channel", 1522 ring_p->tdc, channel)); 1523 return; 1524 } 1525 1526 MUTEX_ENTER(&ring_p->lock); 1527 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1528 ring_p->rd_index = 0; 1529 ring_p->wr_index = 0; 1530 ring_p->ring_head.value = 0; 1531 ring_p->ring_kick_tail.value = 0; 1532 ring_p->descs_pending = 0; 1533 MUTEX_EXIT(&ring_p->lock); 1534 1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1536 } 1537 1538 /*ARGSUSED*/ 1539 void 1540 nxge_txdma_hw_kick(p_nxge_t nxgep) 1541 { 1542 nxge_grp_set_t *set = &nxgep->tx_set; 1543 int tdc; 1544 1545 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1546 1547 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1548 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1549 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1550 return; 1551 } 1552 1553 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1554 if ((1 << tdc) & set->owned.map) { 1555 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1556 if (ring) { 1557 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1558 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1559 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1560 } 1561 } 1562 } 1563 1564 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1565 } 1566 1567 /*ARGSUSED*/ 1568 void 1569 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1570 { 1571 p_tx_ring_t ring_p; 1572 1573 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1574 1575 ring_p = nxge_txdma_get_ring(nxgep, channel); 1576 if (ring_p == NULL) { 1577 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1578 " nxge_txdma_kick_channel")); 1579 return; 1580 } 1581 1582 if (ring_p->tdc != channel) { 1583 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1584 "<== nxge_txdma_kick_channel: channel not matched " 1585 "ring tdc %d passed channel", 1586 ring_p->tdc, channel)); 1587 return; 1588 } 1589 1590 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1591 1592 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1593 } 1594 1595 /*ARGSUSED*/ 1596 void 1597 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1598 { 1599 1600 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1601 1602 if (ring_p == NULL) { 1603 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1604 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1605 return; 1606 } 1607 1608 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1609 } 1610 1611 /* 1612 * nxge_check_tx_hang 1613 * 1614 * Check the state of all TDCs belonging to nxgep. 1615 * 1616 * Arguments: 1617 * nxgep 1618 * 1619 * Notes: 1620 * Called by nxge_hw.c:nxge_check_hw_state(). 1621 * 1622 * NPI/NXGE function calls: 1623 * 1624 * Registers accessed: 1625 * 1626 * Context: 1627 * Any domain 1628 */ 1629 /*ARGSUSED*/ 1630 void 1631 nxge_check_tx_hang(p_nxge_t nxgep) 1632 { 1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1634 1635 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1636 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1637 goto nxge_check_tx_hang_exit; 1638 } 1639 1640 /* 1641 * Needs inputs from hardware for regs: 1642 * head index had not moved since last timeout. 1643 * packets not transmitted or stuffed registers. 1644 */ 1645 if (nxge_txdma_hung(nxgep)) { 1646 nxge_fixup_hung_txdma_rings(nxgep); 1647 } 1648 1649 nxge_check_tx_hang_exit: 1650 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1651 } 1652 1653 /* 1654 * nxge_txdma_hung 1655 * 1656 * Reset a TDC. 1657 * 1658 * Arguments: 1659 * nxgep 1660 * channel The channel to reset. 1661 * reg_data The current TX_CS. 1662 * 1663 * Notes: 1664 * Called by nxge_check_tx_hang() 1665 * 1666 * NPI/NXGE function calls: 1667 * nxge_txdma_channel_hung() 1668 * 1669 * Registers accessed: 1670 * 1671 * Context: 1672 * Any domain 1673 */ 1674 int 1675 nxge_txdma_hung(p_nxge_t nxgep) 1676 { 1677 nxge_grp_set_t *set = &nxgep->tx_set; 1678 int tdc; 1679 1680 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1681 1682 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1683 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1684 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1685 return (B_FALSE); 1686 } 1687 1688 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1689 if ((1 << tdc) & set->owned.map) { 1690 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1691 if (ring) { 1692 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1693 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1694 "==> nxge_txdma_hung: TDC %d hung", 1695 tdc)); 1696 return (B_TRUE); 1697 } 1698 } 1699 } 1700 } 1701 1702 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1703 1704 return (B_FALSE); 1705 } 1706 1707 /* 1708 * nxge_txdma_channel_hung 1709 * 1710 * Reset a TDC. 1711 * 1712 * Arguments: 1713 * nxgep 1714 * ring <channel>'s ring. 1715 * channel The channel to reset. 1716 * 1717 * Notes: 1718 * Called by nxge_txdma.c:nxge_txdma_hung() 1719 * 1720 * NPI/NXGE function calls: 1721 * npi_txdma_ring_head_get() 1722 * 1723 * Registers accessed: 1724 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1725 * 1726 * Context: 1727 * Any domain 1728 */ 1729 int 1730 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1731 { 1732 uint16_t head_index, tail_index; 1733 boolean_t head_wrap, tail_wrap; 1734 npi_handle_t handle; 1735 tx_ring_hdl_t tx_head; 1736 uint_t tx_rd_index; 1737 1738 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1739 1740 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1741 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1742 "==> nxge_txdma_channel_hung: channel %d", channel)); 1743 MUTEX_ENTER(&tx_ring_p->lock); 1744 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1745 1746 tail_index = tx_ring_p->wr_index; 1747 tail_wrap = tx_ring_p->wr_index_wrap; 1748 tx_rd_index = tx_ring_p->rd_index; 1749 MUTEX_EXIT(&tx_ring_p->lock); 1750 1751 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1752 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1753 "tail_index %d tail_wrap %d ", 1754 channel, tx_rd_index, tail_index, tail_wrap)); 1755 /* 1756 * Read the hardware maintained transmit head 1757 * and wrap around bit. 1758 */ 1759 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1760 head_index = tx_head.bits.ldw.head; 1761 head_wrap = tx_head.bits.ldw.wrap; 1762 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1763 "==> nxge_txdma_channel_hung: " 1764 "tx_rd_index %d tail %d tail_wrap %d " 1765 "head %d wrap %d", 1766 tx_rd_index, tail_index, tail_wrap, 1767 head_index, head_wrap)); 1768 1769 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1770 tail_index, tail_wrap) && 1771 (head_index == tx_rd_index)) { 1772 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1773 "==> nxge_txdma_channel_hung: EMPTY")); 1774 return (B_FALSE); 1775 } 1776 1777 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1778 "==> nxge_txdma_channel_hung: Checking if ring full")); 1779 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1780 tail_wrap)) { 1781 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1782 "==> nxge_txdma_channel_hung: full")); 1783 return (B_TRUE); 1784 } 1785 1786 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1787 1788 return (B_FALSE); 1789 } 1790 1791 /* 1792 * nxge_fixup_hung_txdma_rings 1793 * 1794 * Disable a TDC. 1795 * 1796 * Arguments: 1797 * nxgep 1798 * channel The channel to reset. 1799 * reg_data The current TX_CS. 1800 * 1801 * Notes: 1802 * Called by nxge_check_tx_hang() 1803 * 1804 * NPI/NXGE function calls: 1805 * npi_txdma_ring_head_get() 1806 * 1807 * Registers accessed: 1808 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1809 * 1810 * Context: 1811 * Any domain 1812 */ 1813 /*ARGSUSED*/ 1814 void 1815 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1816 { 1817 nxge_grp_set_t *set = &nxgep->tx_set; 1818 int tdc; 1819 1820 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1821 1822 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1823 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1824 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1825 return; 1826 } 1827 1828 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1829 if ((1 << tdc) & set->owned.map) { 1830 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1831 if (ring) { 1832 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1833 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1834 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1835 tdc)); 1836 } 1837 } 1838 } 1839 1840 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1841 } 1842 1843 /* 1844 * nxge_txdma_fixup_hung_channel 1845 * 1846 * 'Fix' a hung TDC. 1847 * 1848 * Arguments: 1849 * nxgep 1850 * channel The channel to fix. 1851 * 1852 * Notes: 1853 * Called by nxge_fixup_hung_txdma_rings() 1854 * 1855 * 1. Reclaim the TDC. 1856 * 2. Disable the TDC. 1857 * 1858 * NPI/NXGE function calls: 1859 * nxge_txdma_reclaim() 1860 * npi_txdma_channel_disable(TX_CS) 1861 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1862 * 1863 * Registers accessed: 1864 * TX_CS DMC+0x40028 Transmit Control And Status 1865 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1866 * 1867 * Context: 1868 * Any domain 1869 */ 1870 /*ARGSUSED*/ 1871 void 1872 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1873 { 1874 p_tx_ring_t ring_p; 1875 1876 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1877 ring_p = nxge_txdma_get_ring(nxgep, channel); 1878 if (ring_p == NULL) { 1879 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1880 "<== nxge_txdma_fix_hung_channel")); 1881 return; 1882 } 1883 1884 if (ring_p->tdc != channel) { 1885 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1886 "<== nxge_txdma_fix_hung_channel: channel not matched " 1887 "ring tdc %d passed channel", 1888 ring_p->tdc, channel)); 1889 return; 1890 } 1891 1892 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1893 1894 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1895 } 1896 1897 /*ARGSUSED*/ 1898 void 1899 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1900 uint16_t channel) 1901 { 1902 npi_handle_t handle; 1903 tdmc_intr_dbg_t intr_dbg; 1904 int status = NXGE_OK; 1905 1906 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1907 1908 if (ring_p == NULL) { 1909 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1910 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1911 return; 1912 } 1913 1914 if (ring_p->tdc != channel) { 1915 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1916 "<== nxge_txdma_fixup_hung_channel: channel " 1917 "not matched " 1918 "ring tdc %d passed channel", 1919 ring_p->tdc, channel)); 1920 return; 1921 } 1922 1923 /* Reclaim descriptors */ 1924 MUTEX_ENTER(&ring_p->lock); 1925 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1926 MUTEX_EXIT(&ring_p->lock); 1927 1928 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1929 /* 1930 * Stop the dma channel waits for the stop done. 1931 * If the stop done bit is not set, then force 1932 * an error. 1933 */ 1934 status = npi_txdma_channel_disable(handle, channel); 1935 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1936 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1937 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1938 "ring tdc %d passed channel %d", 1939 ring_p->tdc, channel)); 1940 return; 1941 } 1942 1943 /* Inject any error */ 1944 intr_dbg.value = 0; 1945 intr_dbg.bits.ldw.nack_pref = 1; 1946 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1947 1948 /* Stop done bit will be set as a result of error injection */ 1949 status = npi_txdma_channel_disable(handle, channel); 1950 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1951 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1952 "<== nxge_txdma_fixup_hung_channel: stopped again" 1953 "ring tdc %d passed channel", 1954 ring_p->tdc, channel)); 1955 return; 1956 } 1957 1958 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1959 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1960 "ring tdc %d passed channel", 1961 ring_p->tdc, channel)); 1962 1963 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1964 } 1965 1966 /*ARGSUSED*/ 1967 void 1968 nxge_reclaim_rings(p_nxge_t nxgep) 1969 { 1970 nxge_grp_set_t *set = &nxgep->tx_set; 1971 int tdc; 1972 1973 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 1974 1975 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1976 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1977 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1978 return; 1979 } 1980 1981 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1982 if ((1 << tdc) & set->owned.map) { 1983 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1984 if (ring) { 1985 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1986 "==> nxge_reclaim_rings: TDC %d", tdc)); 1987 MUTEX_ENTER(&ring->lock); 1988 (void) nxge_txdma_reclaim(nxgep, ring, tdc); 1989 MUTEX_EXIT(&ring->lock); 1990 } 1991 } 1992 } 1993 1994 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1995 } 1996 1997 void 1998 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1999 { 2000 nxge_grp_set_t *set = &nxgep->tx_set; 2001 npi_handle_t handle; 2002 int tdc; 2003 2004 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2005 2006 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2007 2008 if (!isLDOMguest(nxgep)) { 2009 (void) npi_txdma_dump_fzc_regs(handle); 2010 2011 /* Dump TXC registers. */ 2012 (void) npi_txc_dump_fzc_regs(handle); 2013 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2014 } 2015 2016 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2017 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2018 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2019 return; 2020 } 2021 2022 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2023 if ((1 << tdc) & set->owned.map) { 2024 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2025 if (ring) { 2026 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2027 "==> nxge_txdma_regs_dump_channels: " 2028 "TDC %d", tdc)); 2029 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2030 2031 /* Dump TXC registers, if able to. */ 2032 if (!isLDOMguest(nxgep)) { 2033 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2034 "==> nxge_txdma_regs_dump_channels:" 2035 " FZC TDC %d", tdc)); 2036 (void) npi_txc_dump_tdc_fzc_regs 2037 (handle, tdc); 2038 } 2039 nxge_txdma_regs_dump(nxgep, tdc); 2040 } 2041 } 2042 } 2043 2044 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2045 } 2046 2047 void 2048 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2049 { 2050 npi_handle_t handle; 2051 tx_ring_hdl_t hdl; 2052 tx_ring_kick_t kick; 2053 tx_cs_t cs; 2054 txc_control_t control; 2055 uint32_t bitmap = 0; 2056 uint32_t burst = 0; 2057 uint32_t bytes = 0; 2058 dma_log_page_t cfg; 2059 2060 printf("\n\tfunc # %d tdc %d ", 2061 nxgep->function_num, channel); 2062 cfg.page_num = 0; 2063 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2064 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2065 printf("\n\tlog page func %d valid page 0 %d", 2066 cfg.func_num, cfg.valid); 2067 cfg.page_num = 1; 2068 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2069 printf("\n\tlog page func %d valid page 1 %d", 2070 cfg.func_num, cfg.valid); 2071 2072 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2073 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2074 printf("\n\thead value is 0x%0llx", 2075 (long long)hdl.value); 2076 printf("\n\thead index %d", hdl.bits.ldw.head); 2077 printf("\n\tkick value is 0x%0llx", 2078 (long long)kick.value); 2079 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2080 2081 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2082 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2083 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2084 2085 (void) npi_txc_control(handle, OP_GET, &control); 2086 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2087 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2088 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2089 2090 printf("\n\tTXC port control 0x%0llx", 2091 (long long)control.value); 2092 printf("\n\tTXC port bitmap 0x%x", bitmap); 2093 printf("\n\tTXC max burst %d", burst); 2094 printf("\n\tTXC bytes xmt %d\n", bytes); 2095 2096 { 2097 ipp_status_t status; 2098 2099 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2100 #if defined(__i386) 2101 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2102 #else 2103 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2104 #endif 2105 } 2106 } 2107 2108 /* 2109 * nxge_tdc_hvio_setup 2110 * 2111 * I'm not exactly sure what this code does. 2112 * 2113 * Arguments: 2114 * nxgep 2115 * channel The channel to map. 2116 * 2117 * Notes: 2118 * 2119 * NPI/NXGE function calls: 2120 * na 2121 * 2122 * Context: 2123 * Service domain? 2124 */ 2125 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2126 static void 2127 nxge_tdc_hvio_setup( 2128 nxge_t *nxgep, int channel) 2129 { 2130 nxge_dma_common_t *data; 2131 nxge_dma_common_t *control; 2132 tx_ring_t *ring; 2133 2134 ring = nxgep->tx_rings->rings[channel]; 2135 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2136 2137 ring->hv_set = B_FALSE; 2138 2139 ring->hv_tx_buf_base_ioaddr_pp = 2140 (uint64_t)data->orig_ioaddr_pp; 2141 ring->hv_tx_buf_ioaddr_size = 2142 (uint64_t)data->orig_alength; 2143 2144 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2145 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2146 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2147 ring->hv_tx_buf_base_ioaddr_pp, 2148 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2149 data->ioaddr_pp, data->orig_vatopa, 2150 data->orig_alength, data->orig_alength)); 2151 2152 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2153 2154 ring->hv_tx_cntl_base_ioaddr_pp = 2155 (uint64_t)control->orig_ioaddr_pp; 2156 ring->hv_tx_cntl_ioaddr_size = 2157 (uint64_t)control->orig_alength; 2158 2159 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2160 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2161 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2162 ring->hv_tx_cntl_base_ioaddr_pp, 2163 control->orig_ioaddr_pp, control->orig_vatopa, 2164 ring->hv_tx_cntl_ioaddr_size, 2165 control->orig_alength, control->orig_alength)); 2166 } 2167 #endif 2168 2169 static nxge_status_t 2170 nxge_map_txdma(p_nxge_t nxgep, int channel) 2171 { 2172 nxge_dma_common_t **pData; 2173 nxge_dma_common_t **pControl; 2174 tx_ring_t **pRing, *ring; 2175 tx_mbox_t **mailbox; 2176 uint32_t num_chunks; 2177 2178 nxge_status_t status = NXGE_OK; 2179 2180 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2181 2182 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2183 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2185 "<== nxge_map_txdma: buf not allocated")); 2186 return (NXGE_ERROR); 2187 } 2188 } 2189 2190 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2191 return (NXGE_ERROR); 2192 2193 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2194 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2195 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2196 pRing = &nxgep->tx_rings->rings[channel]; 2197 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2198 2199 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2200 "tx_rings $%p tx_desc_rings $%p", 2201 nxgep->tx_rings, nxgep->tx_rings->rings)); 2202 2203 /* 2204 * Map descriptors from the buffer pools for <channel>. 2205 */ 2206 2207 /* 2208 * Set up and prepare buffer blocks, descriptors 2209 * and mailbox. 2210 */ 2211 status = nxge_map_txdma_channel(nxgep, channel, 2212 pData, pRing, num_chunks, pControl, mailbox); 2213 if (status != NXGE_OK) { 2214 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2215 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2216 "returned 0x%x", 2217 nxgep, channel, status)); 2218 return (status); 2219 } 2220 2221 ring = *pRing; 2222 2223 ring->index = (uint16_t)channel; 2224 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2225 2226 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2227 if (isLDOMguest(nxgep)) { 2228 (void) nxge_tdc_lp_conf(nxgep, channel); 2229 } else { 2230 nxge_tdc_hvio_setup(nxgep, channel); 2231 } 2232 #endif 2233 2234 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2235 "(status 0x%x channel %d)", status, channel)); 2236 2237 return (status); 2238 } 2239 2240 static nxge_status_t 2241 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2242 p_nxge_dma_common_t *dma_buf_p, 2243 p_tx_ring_t *tx_desc_p, 2244 uint32_t num_chunks, 2245 p_nxge_dma_common_t *dma_cntl_p, 2246 p_tx_mbox_t *tx_mbox_p) 2247 { 2248 int status = NXGE_OK; 2249 2250 /* 2251 * Set up and prepare buffer blocks, descriptors 2252 * and mailbox. 2253 */ 2254 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2255 "==> nxge_map_txdma_channel (channel %d)", channel)); 2256 /* 2257 * Transmit buffer blocks 2258 */ 2259 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2260 dma_buf_p, tx_desc_p, num_chunks); 2261 if (status != NXGE_OK) { 2262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2263 "==> nxge_map_txdma_channel (channel %d): " 2264 "map buffer failed 0x%x", channel, status)); 2265 goto nxge_map_txdma_channel_exit; 2266 } 2267 2268 /* 2269 * Transmit block ring, and mailbox. 2270 */ 2271 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2272 tx_mbox_p); 2273 2274 goto nxge_map_txdma_channel_exit; 2275 2276 nxge_map_txdma_channel_fail1: 2277 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2278 "==> nxge_map_txdma_channel: unmap buf" 2279 "(status 0x%x channel %d)", 2280 status, channel)); 2281 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2282 2283 nxge_map_txdma_channel_exit: 2284 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2285 "<== nxge_map_txdma_channel: " 2286 "(status 0x%x channel %d)", 2287 status, channel)); 2288 2289 return (status); 2290 } 2291 2292 /*ARGSUSED*/ 2293 static void 2294 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2295 { 2296 tx_ring_t *ring; 2297 tx_mbox_t *mailbox; 2298 2299 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2300 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2301 /* 2302 * unmap tx block ring, and mailbox. 2303 */ 2304 ring = nxgep->tx_rings->rings[channel]; 2305 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2306 2307 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2308 2309 /* unmap buffer blocks */ 2310 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2311 2312 nxge_free_txb(nxgep, channel); 2313 2314 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2315 } 2316 2317 /* 2318 * nxge_map_txdma_channel_cfg_ring 2319 * 2320 * Map a TDC into our kernel space. 2321 * This function allocates all of the per-channel data structures. 2322 * 2323 * Arguments: 2324 * nxgep 2325 * dma_channel The channel to map. 2326 * dma_cntl_p 2327 * tx_ring_p dma_channel's transmit ring 2328 * tx_mbox_p dma_channel's mailbox 2329 * 2330 * Notes: 2331 * 2332 * NPI/NXGE function calls: 2333 * nxge_setup_dma_common() 2334 * 2335 * Registers accessed: 2336 * none. 2337 * 2338 * Context: 2339 * Any domain 2340 */ 2341 /*ARGSUSED*/ 2342 static void 2343 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2344 p_nxge_dma_common_t *dma_cntl_p, 2345 p_tx_ring_t tx_ring_p, 2346 p_tx_mbox_t *tx_mbox_p) 2347 { 2348 p_tx_mbox_t mboxp; 2349 p_nxge_dma_common_t cntl_dmap; 2350 p_nxge_dma_common_t dmap; 2351 p_tx_rng_cfig_t tx_ring_cfig_p; 2352 p_tx_ring_kick_t tx_ring_kick_p; 2353 p_tx_cs_t tx_cs_p; 2354 p_tx_dma_ent_msk_t tx_evmask_p; 2355 p_txdma_mbh_t mboxh_p; 2356 p_txdma_mbl_t mboxl_p; 2357 uint64_t tx_desc_len; 2358 2359 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2360 "==> nxge_map_txdma_channel_cfg_ring")); 2361 2362 cntl_dmap = *dma_cntl_p; 2363 2364 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2365 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2366 sizeof (tx_desc_t)); 2367 /* 2368 * Zero out transmit ring descriptors. 2369 */ 2370 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2371 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2372 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2373 tx_cs_p = &(tx_ring_p->tx_cs); 2374 tx_evmask_p = &(tx_ring_p->tx_evmask); 2375 tx_ring_cfig_p->value = 0; 2376 tx_ring_kick_p->value = 0; 2377 tx_cs_p->value = 0; 2378 tx_evmask_p->value = 0; 2379 2380 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2381 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2382 dma_channel, 2383 dmap->dma_cookie.dmac_laddress)); 2384 2385 tx_ring_cfig_p->value = 0; 2386 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2387 tx_ring_cfig_p->value = 2388 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2389 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2390 2391 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2392 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2393 dma_channel, 2394 tx_ring_cfig_p->value)); 2395 2396 tx_cs_p->bits.ldw.rst = 1; 2397 2398 /* Map in mailbox */ 2399 mboxp = (p_tx_mbox_t) 2400 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2401 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2402 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2403 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2404 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2405 mboxh_p->value = mboxl_p->value = 0; 2406 2407 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2408 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2409 dmap->dma_cookie.dmac_laddress)); 2410 2411 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2412 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2413 2414 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2415 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2416 2417 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2418 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2419 dmap->dma_cookie.dmac_laddress)); 2420 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2421 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2422 "mbox $%p", 2423 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2424 tx_ring_p->page_valid.value = 0; 2425 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2426 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2427 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2428 tx_ring_p->page_hdl.value = 0; 2429 2430 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2431 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2432 2433 tx_ring_p->max_burst.value = 0; 2434 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2435 2436 *tx_mbox_p = mboxp; 2437 2438 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2439 "<== nxge_map_txdma_channel_cfg_ring")); 2440 } 2441 2442 /*ARGSUSED*/ 2443 static void 2444 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2445 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2446 { 2447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2448 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2449 tx_ring_p->tdc)); 2450 2451 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2452 2453 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2454 "<== nxge_unmap_txdma_channel_cfg_ring")); 2455 } 2456 2457 /* 2458 * nxge_map_txdma_channel_buf_ring 2459 * 2460 * 2461 * Arguments: 2462 * nxgep 2463 * channel The channel to map. 2464 * dma_buf_p 2465 * tx_desc_p channel's descriptor ring 2466 * num_chunks 2467 * 2468 * Notes: 2469 * 2470 * NPI/NXGE function calls: 2471 * nxge_setup_dma_common() 2472 * 2473 * Registers accessed: 2474 * none. 2475 * 2476 * Context: 2477 * Any domain 2478 */ 2479 static nxge_status_t 2480 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2481 p_nxge_dma_common_t *dma_buf_p, 2482 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2483 { 2484 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2485 p_nxge_dma_common_t dmap; 2486 nxge_os_dma_handle_t tx_buf_dma_handle; 2487 p_tx_ring_t tx_ring_p; 2488 p_tx_msg_t tx_msg_ring; 2489 nxge_status_t status = NXGE_OK; 2490 int ddi_status = DDI_SUCCESS; 2491 int i, j, index; 2492 uint32_t size, bsize; 2493 uint32_t nblocks, nmsgs; 2494 2495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2496 "==> nxge_map_txdma_channel_buf_ring")); 2497 2498 dma_bufp = tmp_bufp = *dma_buf_p; 2499 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2500 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2501 "chunks bufp $%p", 2502 channel, num_chunks, dma_bufp)); 2503 2504 nmsgs = 0; 2505 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2506 nmsgs += tmp_bufp->nblocks; 2507 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2508 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2509 "bufp $%p nblocks %d nmsgs %d", 2510 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2511 } 2512 if (!nmsgs) { 2513 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2514 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2515 "no msg blocks", 2516 channel)); 2517 status = NXGE_ERROR; 2518 goto nxge_map_txdma_channel_buf_ring_exit; 2519 } 2520 2521 tx_ring_p = (p_tx_ring_t) 2522 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2523 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2524 (void *)nxgep->interrupt_cookie); 2525 2526 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2527 tx_ring_p->tx_ring_busy = B_FALSE; 2528 tx_ring_p->nxgep = nxgep; 2529 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2530 nxge_serial_tx, tx_ring_p); 2531 /* 2532 * Allocate transmit message rings and handles for packets 2533 * not to be copied to premapped buffers. 2534 */ 2535 size = nmsgs * sizeof (tx_msg_t); 2536 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2537 for (i = 0; i < nmsgs; i++) { 2538 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2539 DDI_DMA_DONTWAIT, 0, 2540 &tx_msg_ring[i].dma_handle); 2541 if (ddi_status != DDI_SUCCESS) { 2542 status |= NXGE_DDI_FAILED; 2543 break; 2544 } 2545 } 2546 if (i < nmsgs) { 2547 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2548 "Allocate handles failed.")); 2549 goto nxge_map_txdma_channel_buf_ring_fail1; 2550 } 2551 2552 tx_ring_p->tdc = channel; 2553 tx_ring_p->tx_msg_ring = tx_msg_ring; 2554 tx_ring_p->tx_ring_size = nmsgs; 2555 tx_ring_p->num_chunks = num_chunks; 2556 if (!nxge_tx_intr_thres) { 2557 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2558 } 2559 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2560 tx_ring_p->rd_index = 0; 2561 tx_ring_p->wr_index = 0; 2562 tx_ring_p->ring_head.value = 0; 2563 tx_ring_p->ring_kick_tail.value = 0; 2564 tx_ring_p->descs_pending = 0; 2565 2566 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2567 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2568 "actual tx desc max %d nmsgs %d " 2569 "(config nxge_tx_ring_size %d)", 2570 channel, tx_ring_p->tx_ring_size, nmsgs, 2571 nxge_tx_ring_size)); 2572 2573 /* 2574 * Map in buffers from the buffer pool. 2575 */ 2576 index = 0; 2577 bsize = dma_bufp->block_size; 2578 2579 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2580 "dma_bufp $%p tx_rng_p $%p " 2581 "tx_msg_rng_p $%p bsize %d", 2582 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2583 2584 tx_buf_dma_handle = dma_bufp->dma_handle; 2585 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2586 bsize = dma_bufp->block_size; 2587 nblocks = dma_bufp->nblocks; 2588 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2589 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2590 "size %d dma_bufp $%p", 2591 i, sizeof (nxge_dma_common_t), dma_bufp)); 2592 2593 for (j = 0; j < nblocks; j++) { 2594 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2595 dmap = &tx_msg_ring[index++].buf_dma; 2596 #ifdef TX_MEM_DEBUG 2597 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2598 "==> nxge_map_txdma_channel_buf_ring: j %d" 2599 "dmap $%p", i, dmap)); 2600 #endif 2601 nxge_setup_dma_common(dmap, dma_bufp, 1, 2602 bsize); 2603 } 2604 } 2605 2606 if (i < num_chunks) { 2607 status = NXGE_ERROR; 2608 goto nxge_map_txdma_channel_buf_ring_fail1; 2609 } 2610 2611 *tx_desc_p = tx_ring_p; 2612 2613 goto nxge_map_txdma_channel_buf_ring_exit; 2614 2615 nxge_map_txdma_channel_buf_ring_fail1: 2616 if (tx_ring_p->serial) { 2617 nxge_serialize_destroy(tx_ring_p->serial); 2618 tx_ring_p->serial = NULL; 2619 } 2620 2621 index--; 2622 for (; index >= 0; index--) { 2623 if (tx_msg_ring[index].dma_handle != NULL) { 2624 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2625 } 2626 } 2627 MUTEX_DESTROY(&tx_ring_p->lock); 2628 KMEM_FREE(tx_msg_ring, size); 2629 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2630 2631 status = NXGE_ERROR; 2632 2633 nxge_map_txdma_channel_buf_ring_exit: 2634 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2635 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2636 2637 return (status); 2638 } 2639 2640 /*ARGSUSED*/ 2641 static void 2642 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2643 { 2644 p_tx_msg_t tx_msg_ring; 2645 p_tx_msg_t tx_msg_p; 2646 int i; 2647 2648 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2649 "==> nxge_unmap_txdma_channel_buf_ring")); 2650 if (tx_ring_p == NULL) { 2651 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2652 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2653 return; 2654 } 2655 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2656 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2657 tx_ring_p->tdc)); 2658 2659 tx_msg_ring = tx_ring_p->tx_msg_ring; 2660 2661 /* 2662 * Since the serialization thread, timer thread and 2663 * interrupt thread can all call the transmit reclaim, 2664 * the unmapping function needs to acquire the lock 2665 * to free those buffers which were transmitted 2666 * by the hardware already. 2667 */ 2668 MUTEX_ENTER(&tx_ring_p->lock); 2669 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2670 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2671 "channel %d", 2672 tx_ring_p->tdc)); 2673 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2674 2675 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2676 tx_msg_p = &tx_msg_ring[i]; 2677 if (tx_msg_p->tx_message != NULL) { 2678 freemsg(tx_msg_p->tx_message); 2679 tx_msg_p->tx_message = NULL; 2680 } 2681 } 2682 2683 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2684 if (tx_msg_ring[i].dma_handle != NULL) { 2685 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2686 } 2687 tx_msg_ring[i].dma_handle = NULL; 2688 } 2689 2690 MUTEX_EXIT(&tx_ring_p->lock); 2691 2692 if (tx_ring_p->serial) { 2693 nxge_serialize_destroy(tx_ring_p->serial); 2694 tx_ring_p->serial = NULL; 2695 } 2696 2697 MUTEX_DESTROY(&tx_ring_p->lock); 2698 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2699 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2700 2701 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2702 "<== nxge_unmap_txdma_channel_buf_ring")); 2703 } 2704 2705 static nxge_status_t 2706 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2707 { 2708 p_tx_rings_t tx_rings; 2709 p_tx_ring_t *tx_desc_rings; 2710 p_tx_mbox_areas_t tx_mbox_areas_p; 2711 p_tx_mbox_t *tx_mbox_p; 2712 nxge_status_t status = NXGE_OK; 2713 2714 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2715 2716 tx_rings = nxgep->tx_rings; 2717 if (tx_rings == NULL) { 2718 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2719 "<== nxge_txdma_hw_start: NULL ring pointer")); 2720 return (NXGE_ERROR); 2721 } 2722 tx_desc_rings = tx_rings->rings; 2723 if (tx_desc_rings == NULL) { 2724 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2725 "<== nxge_txdma_hw_start: NULL ring pointers")); 2726 return (NXGE_ERROR); 2727 } 2728 2729 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2730 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2731 2732 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2733 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2734 2735 status = nxge_txdma_start_channel(nxgep, channel, 2736 (p_tx_ring_t)tx_desc_rings[channel], 2737 (p_tx_mbox_t)tx_mbox_p[channel]); 2738 if (status != NXGE_OK) { 2739 goto nxge_txdma_hw_start_fail1; 2740 } 2741 2742 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2743 "tx_rings $%p rings $%p", 2744 nxgep->tx_rings, nxgep->tx_rings->rings)); 2745 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2746 "tx_rings $%p tx_desc_rings $%p", 2747 nxgep->tx_rings, tx_desc_rings)); 2748 2749 goto nxge_txdma_hw_start_exit; 2750 2751 nxge_txdma_hw_start_fail1: 2752 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2753 "==> nxge_txdma_hw_start: disable " 2754 "(status 0x%x channel %d)", status, channel)); 2755 2756 nxge_txdma_hw_start_exit: 2757 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2758 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2759 2760 return (status); 2761 } 2762 2763 /* 2764 * nxge_txdma_start_channel 2765 * 2766 * Start a TDC. 2767 * 2768 * Arguments: 2769 * nxgep 2770 * channel The channel to start. 2771 * tx_ring_p channel's transmit descriptor ring. 2772 * tx_mbox_p channel' smailbox. 2773 * 2774 * Notes: 2775 * 2776 * NPI/NXGE function calls: 2777 * nxge_reset_txdma_channel() 2778 * nxge_init_txdma_channel_event_mask() 2779 * nxge_enable_txdma_channel() 2780 * 2781 * Registers accessed: 2782 * none directly (see functions above). 2783 * 2784 * Context: 2785 * Any domain 2786 */ 2787 static nxge_status_t 2788 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2789 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2790 2791 { 2792 nxge_status_t status = NXGE_OK; 2793 2794 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2795 "==> nxge_txdma_start_channel (channel %d)", channel)); 2796 /* 2797 * TXDMA/TXC must be in stopped state. 2798 */ 2799 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2800 2801 /* 2802 * Reset TXDMA channel 2803 */ 2804 tx_ring_p->tx_cs.value = 0; 2805 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2806 status = nxge_reset_txdma_channel(nxgep, channel, 2807 tx_ring_p->tx_cs.value); 2808 if (status != NXGE_OK) { 2809 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2810 "==> nxge_txdma_start_channel (channel %d)" 2811 " reset channel failed 0x%x", channel, status)); 2812 goto nxge_txdma_start_channel_exit; 2813 } 2814 2815 /* 2816 * Initialize the TXDMA channel specific FZC control 2817 * configurations. These FZC registers are pertaining 2818 * to each TX channel (i.e. logical pages). 2819 */ 2820 if (!isLDOMguest(nxgep)) { 2821 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2822 tx_ring_p, tx_mbox_p); 2823 if (status != NXGE_OK) { 2824 goto nxge_txdma_start_channel_exit; 2825 } 2826 } 2827 2828 /* 2829 * Initialize the event masks. 2830 */ 2831 tx_ring_p->tx_evmask.value = 0; 2832 status = nxge_init_txdma_channel_event_mask(nxgep, 2833 channel, &tx_ring_p->tx_evmask); 2834 if (status != NXGE_OK) { 2835 goto nxge_txdma_start_channel_exit; 2836 } 2837 2838 /* 2839 * Load TXDMA descriptors, buffers, mailbox, 2840 * initialise the DMA channels and 2841 * enable each DMA channel. 2842 */ 2843 status = nxge_enable_txdma_channel(nxgep, channel, 2844 tx_ring_p, tx_mbox_p); 2845 if (status != NXGE_OK) { 2846 goto nxge_txdma_start_channel_exit; 2847 } 2848 2849 nxge_txdma_start_channel_exit: 2850 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2851 2852 return (status); 2853 } 2854 2855 /* 2856 * nxge_txdma_stop_channel 2857 * 2858 * Stop a TDC. 2859 * 2860 * Arguments: 2861 * nxgep 2862 * channel The channel to stop. 2863 * tx_ring_p channel's transmit descriptor ring. 2864 * tx_mbox_p channel' smailbox. 2865 * 2866 * Notes: 2867 * 2868 * NPI/NXGE function calls: 2869 * nxge_txdma_stop_inj_err() 2870 * nxge_reset_txdma_channel() 2871 * nxge_init_txdma_channel_event_mask() 2872 * nxge_init_txdma_channel_cntl_stat() 2873 * nxge_disable_txdma_channel() 2874 * 2875 * Registers accessed: 2876 * none directly (see functions above). 2877 * 2878 * Context: 2879 * Any domain 2880 */ 2881 /*ARGSUSED*/ 2882 static nxge_status_t 2883 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2884 { 2885 p_tx_ring_t tx_ring_p; 2886 int status = NXGE_OK; 2887 2888 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2889 "==> nxge_txdma_stop_channel: channel %d", channel)); 2890 2891 /* 2892 * Stop (disable) TXDMA and TXC (if stop bit is set 2893 * and STOP_N_GO bit not set, the TXDMA reset state will 2894 * not be set if reset TXDMA. 2895 */ 2896 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2897 2898 tx_ring_p = nxgep->tx_rings->rings[channel]; 2899 2900 /* 2901 * Reset TXDMA channel 2902 */ 2903 tx_ring_p->tx_cs.value = 0; 2904 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2905 status = nxge_reset_txdma_channel(nxgep, channel, 2906 tx_ring_p->tx_cs.value); 2907 if (status != NXGE_OK) { 2908 goto nxge_txdma_stop_channel_exit; 2909 } 2910 2911 #ifdef HARDWARE_REQUIRED 2912 /* Set up the interrupt event masks. */ 2913 tx_ring_p->tx_evmask.value = 0; 2914 status = nxge_init_txdma_channel_event_mask(nxgep, 2915 channel, &tx_ring_p->tx_evmask); 2916 if (status != NXGE_OK) { 2917 goto nxge_txdma_stop_channel_exit; 2918 } 2919 2920 /* Initialize the DMA control and status register */ 2921 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2922 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2923 tx_ring_p->tx_cs.value); 2924 if (status != NXGE_OK) { 2925 goto nxge_txdma_stop_channel_exit; 2926 } 2927 2928 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2929 2930 /* Disable channel */ 2931 status = nxge_disable_txdma_channel(nxgep, channel, 2932 tx_ring_p, tx_mbox_p); 2933 if (status != NXGE_OK) { 2934 goto nxge_txdma_start_channel_exit; 2935 } 2936 2937 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2938 "==> nxge_txdma_stop_channel: event done")); 2939 2940 #endif 2941 2942 nxge_txdma_stop_channel_exit: 2943 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2944 return (status); 2945 } 2946 2947 /* 2948 * nxge_txdma_get_ring 2949 * 2950 * Get the ring for a TDC. 2951 * 2952 * Arguments: 2953 * nxgep 2954 * channel 2955 * 2956 * Notes: 2957 * 2958 * NPI/NXGE function calls: 2959 * 2960 * Registers accessed: 2961 * 2962 * Context: 2963 * Any domain 2964 */ 2965 static p_tx_ring_t 2966 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2967 { 2968 nxge_grp_set_t *set = &nxgep->tx_set; 2969 int tdc; 2970 2971 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2972 2973 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2975 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 2976 goto return_null; 2977 } 2978 2979 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2980 if ((1 << tdc) & set->owned.map) { 2981 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2982 if (ring) { 2983 if (channel == ring->tdc) { 2984 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2985 "<== nxge_txdma_get_ring: " 2986 "tdc %d ring $%p", tdc, ring)); 2987 return (ring); 2988 } 2989 } 2990 } 2991 } 2992 2993 return_null: 2994 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 2995 "ring not found")); 2996 2997 return (NULL); 2998 } 2999 3000 /* 3001 * nxge_txdma_get_mbox 3002 * 3003 * Get the mailbox for a TDC. 3004 * 3005 * Arguments: 3006 * nxgep 3007 * channel 3008 * 3009 * Notes: 3010 * 3011 * NPI/NXGE function calls: 3012 * 3013 * Registers accessed: 3014 * 3015 * Context: 3016 * Any domain 3017 */ 3018 static p_tx_mbox_t 3019 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3020 { 3021 nxge_grp_set_t *set = &nxgep->tx_set; 3022 int tdc; 3023 3024 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3025 3026 if (nxgep->tx_mbox_areas_p == 0 || 3027 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3028 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3029 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3030 goto return_null; 3031 } 3032 3033 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3034 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3035 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3036 goto return_null; 3037 } 3038 3039 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3040 if ((1 << tdc) & set->owned.map) { 3041 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3042 if (ring) { 3043 if (channel == ring->tdc) { 3044 tx_mbox_t *mailbox = nxgep-> 3045 tx_mbox_areas_p-> 3046 txmbox_areas_p[tdc]; 3047 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3048 "<== nxge_txdma_get_mbox: tdc %d " 3049 "ring $%p", tdc, mailbox)); 3050 return (mailbox); 3051 } 3052 } 3053 } 3054 } 3055 3056 return_null: 3057 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3058 "mailbox not found")); 3059 3060 return (NULL); 3061 } 3062 3063 /* 3064 * nxge_tx_err_evnts 3065 * 3066 * Recover a TDC. 3067 * 3068 * Arguments: 3069 * nxgep 3070 * index The index to the TDC ring. 3071 * ldvp Used to get the channel number ONLY. 3072 * cs A copy of the bits from TX_CS. 3073 * 3074 * Notes: 3075 * Calling tree: 3076 * nxge_tx_intr() 3077 * 3078 * NPI/NXGE function calls: 3079 * npi_txdma_ring_error_get() 3080 * npi_txdma_inj_par_error_get() 3081 * nxge_txdma_fatal_err_recover() 3082 * 3083 * Registers accessed: 3084 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3085 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3086 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3087 * 3088 * Context: 3089 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3090 */ 3091 /*ARGSUSED*/ 3092 static nxge_status_t 3093 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3094 { 3095 npi_handle_t handle; 3096 npi_status_t rs; 3097 uint8_t channel; 3098 p_tx_ring_t *tx_rings; 3099 p_tx_ring_t tx_ring_p; 3100 p_nxge_tx_ring_stats_t tdc_stats; 3101 boolean_t txchan_fatal = B_FALSE; 3102 nxge_status_t status = NXGE_OK; 3103 tdmc_inj_par_err_t par_err; 3104 uint32_t value; 3105 3106 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3107 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3108 channel = ldvp->channel; 3109 3110 tx_rings = nxgep->tx_rings->rings; 3111 tx_ring_p = tx_rings[index]; 3112 tdc_stats = tx_ring_p->tdc_stats; 3113 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3114 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3115 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3116 if ((rs = npi_txdma_ring_error_get(handle, channel, 3117 &tdc_stats->errlog)) != NPI_SUCCESS) 3118 return (NXGE_ERROR | rs); 3119 } 3120 3121 if (cs.bits.ldw.mbox_err) { 3122 tdc_stats->mbox_err++; 3123 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3124 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3125 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3126 "==> nxge_tx_err_evnts(channel %d): " 3127 "fatal error: mailbox", channel)); 3128 txchan_fatal = B_TRUE; 3129 } 3130 if (cs.bits.ldw.pkt_size_err) { 3131 tdc_stats->pkt_size_err++; 3132 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3133 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3134 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3135 "==> nxge_tx_err_evnts(channel %d): " 3136 "fatal error: pkt_size_err", channel)); 3137 txchan_fatal = B_TRUE; 3138 } 3139 if (cs.bits.ldw.tx_ring_oflow) { 3140 tdc_stats->tx_ring_oflow++; 3141 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3142 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3144 "==> nxge_tx_err_evnts(channel %d): " 3145 "fatal error: tx_ring_oflow", channel)); 3146 txchan_fatal = B_TRUE; 3147 } 3148 if (cs.bits.ldw.pref_buf_par_err) { 3149 tdc_stats->pre_buf_par_err++; 3150 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3151 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3153 "==> nxge_tx_err_evnts(channel %d): " 3154 "fatal error: pre_buf_par_err", channel)); 3155 /* Clear error injection source for parity error */ 3156 (void) npi_txdma_inj_par_error_get(handle, &value); 3157 par_err.value = value; 3158 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3159 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3160 txchan_fatal = B_TRUE; 3161 } 3162 if (cs.bits.ldw.nack_pref) { 3163 tdc_stats->nack_pref++; 3164 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3165 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3167 "==> nxge_tx_err_evnts(channel %d): " 3168 "fatal error: nack_pref", channel)); 3169 txchan_fatal = B_TRUE; 3170 } 3171 if (cs.bits.ldw.nack_pkt_rd) { 3172 tdc_stats->nack_pkt_rd++; 3173 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3174 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3175 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3176 "==> nxge_tx_err_evnts(channel %d): " 3177 "fatal error: nack_pkt_rd", channel)); 3178 txchan_fatal = B_TRUE; 3179 } 3180 if (cs.bits.ldw.conf_part_err) { 3181 tdc_stats->conf_part_err++; 3182 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3183 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3185 "==> nxge_tx_err_evnts(channel %d): " 3186 "fatal error: config_partition_err", channel)); 3187 txchan_fatal = B_TRUE; 3188 } 3189 if (cs.bits.ldw.pkt_prt_err) { 3190 tdc_stats->pkt_part_err++; 3191 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3192 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3193 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3194 "==> nxge_tx_err_evnts(channel %d): " 3195 "fatal error: pkt_prt_err", channel)); 3196 txchan_fatal = B_TRUE; 3197 } 3198 3199 /* Clear error injection source in case this is an injected error */ 3200 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3201 3202 if (txchan_fatal) { 3203 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3204 " nxge_tx_err_evnts: " 3205 " fatal error on channel %d cs 0x%llx\n", 3206 channel, cs.value)); 3207 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3208 tx_ring_p); 3209 if (status == NXGE_OK) { 3210 FM_SERVICE_RESTORED(nxgep); 3211 } 3212 } 3213 3214 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3215 3216 return (status); 3217 } 3218 3219 static nxge_status_t 3220 nxge_txdma_fatal_err_recover( 3221 p_nxge_t nxgep, 3222 uint16_t channel, 3223 p_tx_ring_t tx_ring_p) 3224 { 3225 npi_handle_t handle; 3226 npi_status_t rs = NPI_SUCCESS; 3227 p_tx_mbox_t tx_mbox_p; 3228 nxge_status_t status = NXGE_OK; 3229 3230 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3232 "Recovering from TxDMAChannel#%d error...", channel)); 3233 3234 /* 3235 * Stop the dma channel waits for the stop done. 3236 * If the stop done bit is not set, then create 3237 * an error. 3238 */ 3239 3240 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3241 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3242 MUTEX_ENTER(&tx_ring_p->lock); 3243 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3244 if (rs != NPI_SUCCESS) { 3245 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3246 "==> nxge_txdma_fatal_err_recover (channel %d): " 3247 "stop failed ", channel)); 3248 goto fail; 3249 } 3250 3251 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3252 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3253 3254 /* 3255 * Reset TXDMA channel 3256 */ 3257 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3258 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3259 NPI_SUCCESS) { 3260 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3261 "==> nxge_txdma_fatal_err_recover (channel %d)" 3262 " reset channel failed 0x%x", channel, rs)); 3263 goto fail; 3264 } 3265 3266 /* 3267 * Reset the tail (kick) register to 0. 3268 * (Hardware will not reset it. Tx overflow fatal 3269 * error if tail is not set to 0 after reset! 3270 */ 3271 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3272 3273 /* Restart TXDMA channel */ 3274 3275 if (!isLDOMguest(nxgep)) { 3276 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3277 3278 // XXX This is a problem in HIO! 3279 /* 3280 * Initialize the TXDMA channel specific FZC control 3281 * configurations. These FZC registers are pertaining 3282 * to each TX channel (i.e. logical pages). 3283 */ 3284 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3285 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3286 tx_ring_p, tx_mbox_p); 3287 if (status != NXGE_OK) 3288 goto fail; 3289 } 3290 3291 /* 3292 * Initialize the event masks. 3293 */ 3294 tx_ring_p->tx_evmask.value = 0; 3295 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3296 &tx_ring_p->tx_evmask); 3297 if (status != NXGE_OK) 3298 goto fail; 3299 3300 tx_ring_p->wr_index_wrap = B_FALSE; 3301 tx_ring_p->wr_index = 0; 3302 tx_ring_p->rd_index = 0; 3303 3304 /* 3305 * Load TXDMA descriptors, buffers, mailbox, 3306 * initialise the DMA channels and 3307 * enable each DMA channel. 3308 */ 3309 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3310 status = nxge_enable_txdma_channel(nxgep, channel, 3311 tx_ring_p, tx_mbox_p); 3312 MUTEX_EXIT(&tx_ring_p->lock); 3313 if (status != NXGE_OK) 3314 goto fail; 3315 3316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3317 "Recovery Successful, TxDMAChannel#%d Restored", 3318 channel)); 3319 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3320 3321 return (NXGE_OK); 3322 3323 fail: 3324 MUTEX_EXIT(&tx_ring_p->lock); 3325 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3326 "nxge_txdma_fatal_err_recover (channel %d): " 3327 "failed to recover this txdma channel", channel)); 3328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3329 3330 return (status); 3331 } 3332 3333 /* 3334 * nxge_tx_port_fatal_err_recover 3335 * 3336 * Attempt to recover from a fatal port error. 3337 * 3338 * Arguments: 3339 * nxgep 3340 * 3341 * Notes: 3342 * How would a guest do this? 3343 * 3344 * NPI/NXGE function calls: 3345 * 3346 * Registers accessed: 3347 * 3348 * Context: 3349 * Service domain 3350 */ 3351 nxge_status_t 3352 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3353 { 3354 nxge_grp_set_t *set = &nxgep->tx_set; 3355 nxge_channel_t tdc; 3356 3357 tx_ring_t *ring; 3358 tx_mbox_t *mailbox; 3359 3360 npi_handle_t handle; 3361 nxge_status_t status; 3362 npi_status_t rs; 3363 3364 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3365 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3366 "Recovering from TxPort error...")); 3367 3368 if (isLDOMguest(nxgep)) { 3369 return (NXGE_OK); 3370 } 3371 3372 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3373 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3374 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3375 return (NXGE_ERROR); 3376 } 3377 3378 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3379 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3380 "<== nxge_tx_port_fatal_err_recover: " 3381 "NULL ring pointer(s)")); 3382 return (NXGE_ERROR); 3383 } 3384 3385 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3386 if ((1 << tdc) & set->owned.map) { 3387 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3388 if (ring) 3389 MUTEX_ENTER(&ring->lock); 3390 } 3391 } 3392 3393 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3394 3395 /* 3396 * Stop all the TDCs owned by us. 3397 * (The shared TDCs will have been stopped by their owners.) 3398 */ 3399 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3400 if ((1 << tdc) & set->owned.map) { 3401 ring = nxgep->tx_rings->rings[tdc]; 3402 if (ring) { 3403 rs = npi_txdma_channel_control 3404 (handle, TXDMA_STOP, tdc); 3405 if (rs != NPI_SUCCESS) { 3406 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3407 "nxge_tx_port_fatal_err_recover " 3408 "(channel %d): stop failed ", tdc)); 3409 goto fail; 3410 } 3411 } 3412 } 3413 } 3414 3415 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3416 3417 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3418 if ((1 << tdc) & set->owned.map) { 3419 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3420 if (ring) 3421 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3422 } 3423 } 3424 3425 /* 3426 * Reset all the TDCs. 3427 */ 3428 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3429 3430 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3431 if ((1 << tdc) & set->owned.map) { 3432 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3433 if (ring) { 3434 if ((rs = npi_txdma_channel_control 3435 (handle, TXDMA_RESET, tdc)) 3436 != NPI_SUCCESS) { 3437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3438 "nxge_tx_port_fatal_err_recover " 3439 "(channel %d) reset channel " 3440 "failed 0x%x", tdc, rs)); 3441 goto fail; 3442 } 3443 } 3444 /* 3445 * Reset the tail (kick) register to 0. 3446 * (Hardware will not reset it. Tx overflow fatal 3447 * error if tail is not set to 0 after reset! 3448 */ 3449 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3450 } 3451 } 3452 3453 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3454 3455 /* Restart all the TDCs */ 3456 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3457 if ((1 << tdc) & set->owned.map) { 3458 ring = nxgep->tx_rings->rings[tdc]; 3459 if (ring) { 3460 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3461 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3462 ring, mailbox); 3463 ring->tx_evmask.value = 0; 3464 /* 3465 * Initialize the event masks. 3466 */ 3467 status = nxge_init_txdma_channel_event_mask 3468 (nxgep, tdc, &ring->tx_evmask); 3469 3470 ring->wr_index_wrap = B_FALSE; 3471 ring->wr_index = 0; 3472 ring->rd_index = 0; 3473 3474 if (status != NXGE_OK) 3475 goto fail; 3476 if (status != NXGE_OK) 3477 goto fail; 3478 } 3479 } 3480 } 3481 3482 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3483 3484 /* Re-enable all the TDCs */ 3485 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3486 if ((1 << tdc) & set->owned.map) { 3487 ring = nxgep->tx_rings->rings[tdc]; 3488 if (ring) { 3489 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3490 status = nxge_enable_txdma_channel(nxgep, tdc, 3491 ring, mailbox); 3492 if (status != NXGE_OK) 3493 goto fail; 3494 } 3495 } 3496 } 3497 3498 /* 3499 * Unlock all the TDCs. 3500 */ 3501 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3502 if ((1 << tdc) & set->owned.map) { 3503 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3504 if (ring) 3505 MUTEX_EXIT(&ring->lock); 3506 } 3507 } 3508 3509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3510 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3511 3512 return (NXGE_OK); 3513 3514 fail: 3515 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3516 if ((1 << tdc) & set->owned.map) { 3517 ring = nxgep->tx_rings->rings[tdc]; 3518 if (ring) 3519 MUTEX_EXIT(&ring->lock); 3520 } 3521 } 3522 3523 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3524 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3525 3526 return (status); 3527 } 3528 3529 /* 3530 * nxge_txdma_inject_err 3531 * 3532 * Inject an error into a TDC. 3533 * 3534 * Arguments: 3535 * nxgep 3536 * err_id The error to inject. 3537 * chan The channel to inject into. 3538 * 3539 * Notes: 3540 * This is called from nxge_main.c:nxge_err_inject() 3541 * Has this ioctl ever been used? 3542 * 3543 * NPI/NXGE function calls: 3544 * npi_txdma_inj_par_error_get() 3545 * npi_txdma_inj_par_error_set() 3546 * 3547 * Registers accessed: 3548 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3549 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3550 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3551 * 3552 * Context: 3553 * Service domain 3554 */ 3555 void 3556 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3557 { 3558 tdmc_intr_dbg_t tdi; 3559 tdmc_inj_par_err_t par_err; 3560 uint32_t value; 3561 npi_handle_t handle; 3562 3563 switch (err_id) { 3564 3565 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3566 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3567 /* Clear error injection source for parity error */ 3568 (void) npi_txdma_inj_par_error_get(handle, &value); 3569 par_err.value = value; 3570 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3571 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3572 3573 par_err.bits.ldw.inject_parity_error = (1 << chan); 3574 (void) npi_txdma_inj_par_error_get(handle, &value); 3575 par_err.value = value; 3576 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3577 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3578 (unsigned long long)par_err.value); 3579 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3580 break; 3581 3582 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3583 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3584 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3585 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3586 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3587 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3588 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3589 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3590 chan, &tdi.value); 3591 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3592 tdi.bits.ldw.pref_buf_par_err = 1; 3593 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3594 tdi.bits.ldw.mbox_err = 1; 3595 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3596 tdi.bits.ldw.nack_pref = 1; 3597 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3598 tdi.bits.ldw.nack_pkt_rd = 1; 3599 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3600 tdi.bits.ldw.pkt_size_err = 1; 3601 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3602 tdi.bits.ldw.tx_ring_oflow = 1; 3603 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3604 tdi.bits.ldw.conf_part_err = 1; 3605 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3606 tdi.bits.ldw.pkt_part_err = 1; 3607 #if defined(__i386) 3608 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3609 tdi.value); 3610 #else 3611 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3612 tdi.value); 3613 #endif 3614 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3615 chan, tdi.value); 3616 3617 break; 3618 } 3619 } 3620