1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_txdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 #include <npi_tx_rd64.h> 30 #include <npi_tx_wr64.h> 31 #include <sys/llc1.h> 32 33 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 34 uint32_t nxge_tx_minfree = 32; 35 uint32_t nxge_tx_intr_thres = 0; 36 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 37 uint32_t nxge_tx_tiny_pack = 1; 38 uint32_t nxge_tx_use_bcopy = 1; 39 40 extern uint32_t nxge_tx_ring_size; 41 extern uint32_t nxge_bcopy_thresh; 42 extern uint32_t nxge_dvma_thresh; 43 extern uint32_t nxge_dma_stream_thresh; 44 extern dma_method_t nxge_force_dma; 45 extern uint32_t nxge_cksum_offload; 46 47 /* Device register access attributes for PIO. */ 48 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 49 /* Device descriptor access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 51 /* Device buffer access attributes for DMA. */ 52 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 53 extern ddi_dma_attr_t nxge_desc_dma_attr; 54 extern ddi_dma_attr_t nxge_tx_dma_attr; 55 56 extern int nxge_serial_tx(mblk_t *mp, void *arg); 57 58 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 59 60 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 67 68 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 69 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 70 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 71 72 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 73 p_nxge_dma_common_t *, p_tx_ring_t, 74 p_tx_mbox_t *); 75 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 76 p_tx_ring_t, p_tx_mbox_t); 77 78 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 79 p_tx_ring_t, p_tx_mbox_t); 80 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 81 82 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 83 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 84 p_nxge_ldv_t, tx_cs_t); 85 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 86 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 87 uint16_t, p_tx_ring_t); 88 89 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 90 p_tx_ring_t ring_p, uint16_t channel); 91 92 nxge_status_t 93 nxge_init_txdma_channels(p_nxge_t nxgep) 94 { 95 nxge_grp_set_t *set = &nxgep->tx_set; 96 int i, count; 97 98 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 99 100 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 101 if ((1 << i) & set->lg.map) { 102 int tdc; 103 nxge_grp_t *group = set->group[i]; 104 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 105 if ((1 << tdc) & group->map) { 106 if ((nxge_grp_dc_add(nxgep, 107 group, VP_BOUND_TX, tdc))) 108 return (NXGE_ERROR); 109 } 110 } 111 } 112 if (++count == set->lg.count) 113 break; 114 } 115 116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 117 118 return (NXGE_OK); 119 } 120 121 nxge_status_t 122 nxge_init_txdma_channel( 123 p_nxge_t nxge, 124 int channel) 125 { 126 nxge_status_t status; 127 128 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 129 130 status = nxge_map_txdma(nxge, channel); 131 if (status != NXGE_OK) { 132 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 133 "<== nxge_init_txdma_channel: status 0x%x", status)); 134 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 135 return (status); 136 } 137 138 status = nxge_txdma_hw_start(nxge, channel); 139 if (status != NXGE_OK) { 140 (void) nxge_unmap_txdma_channel(nxge, channel); 141 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 142 return (status); 143 } 144 145 if (!nxge->statsp->tdc_ksp[channel]) 146 nxge_setup_tdc_kstats(nxge, channel); 147 148 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 149 150 return (status); 151 } 152 153 void 154 nxge_uninit_txdma_channels(p_nxge_t nxgep) 155 { 156 nxge_grp_set_t *set = &nxgep->tx_set; 157 int tdc; 158 159 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 160 161 if (set->owned.map == 0) { 162 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 163 "nxge_uninit_txdma_channels: no channels")); 164 return; 165 } 166 167 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 168 if ((1 << tdc) & set->owned.map) { 169 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 170 } 171 } 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 174 } 175 176 void 177 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 178 { 179 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 180 181 if (nxgep->statsp->tdc_ksp[channel]) { 182 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 183 nxgep->statsp->tdc_ksp[channel] = 0; 184 } 185 186 (void) nxge_txdma_stop_channel(nxgep, channel); 187 nxge_unmap_txdma_channel(nxgep, channel); 188 189 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 190 "<== nxge_uninit_txdma_channel")); 191 } 192 193 void 194 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 195 uint32_t entries, uint32_t size) 196 { 197 size_t tsize; 198 *dest_p = *src_p; 199 tsize = size * entries; 200 dest_p->alength = tsize; 201 dest_p->nblocks = entries; 202 dest_p->block_size = size; 203 dest_p->offset += tsize; 204 205 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 206 src_p->alength -= tsize; 207 src_p->dma_cookie.dmac_laddress += tsize; 208 src_p->dma_cookie.dmac_size -= tsize; 209 } 210 211 /* 212 * nxge_reset_txdma_channel 213 * 214 * Reset a TDC. 215 * 216 * Arguments: 217 * nxgep 218 * channel The channel to reset. 219 * reg_data The current TX_CS. 220 * 221 * Notes: 222 * 223 * NPI/NXGE function calls: 224 * npi_txdma_channel_reset() 225 * npi_txdma_channel_control() 226 * 227 * Registers accessed: 228 * TX_CS DMC+0x40028 Transmit Control And Status 229 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 230 * 231 * Context: 232 * Any domain 233 */ 234 nxge_status_t 235 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 236 { 237 npi_status_t rs = NPI_SUCCESS; 238 nxge_status_t status = NXGE_OK; 239 npi_handle_t handle; 240 241 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 242 243 handle = NXGE_DEV_NPI_HANDLE(nxgep); 244 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 245 rs = npi_txdma_channel_reset(handle, channel); 246 } else { 247 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 248 channel); 249 } 250 251 if (rs != NPI_SUCCESS) { 252 status = NXGE_ERROR | rs; 253 } 254 255 /* 256 * Reset the tail (kick) register to 0. 257 * (Hardware will not reset it. Tx overflow fatal 258 * error if tail is not set to 0 after reset! 259 */ 260 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 261 262 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 263 return (status); 264 } 265 266 /* 267 * nxge_init_txdma_channel_event_mask 268 * 269 * Enable interrupts for a set of events. 270 * 271 * Arguments: 272 * nxgep 273 * channel The channel to map. 274 * mask_p The events to enable. 275 * 276 * Notes: 277 * 278 * NPI/NXGE function calls: 279 * npi_txdma_event_mask() 280 * 281 * Registers accessed: 282 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 283 * 284 * Context: 285 * Any domain 286 */ 287 nxge_status_t 288 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 289 p_tx_dma_ent_msk_t mask_p) 290 { 291 npi_handle_t handle; 292 npi_status_t rs = NPI_SUCCESS; 293 nxge_status_t status = NXGE_OK; 294 295 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 296 "<== nxge_init_txdma_channel_event_mask")); 297 298 handle = NXGE_DEV_NPI_HANDLE(nxgep); 299 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 300 if (rs != NPI_SUCCESS) { 301 status = NXGE_ERROR | rs; 302 } 303 304 return (status); 305 } 306 307 /* 308 * nxge_init_txdma_channel_cntl_stat 309 * 310 * Stop a TDC. If at first we don't succeed, inject an error. 311 * 312 * Arguments: 313 * nxgep 314 * channel The channel to stop. 315 * 316 * Notes: 317 * 318 * NPI/NXGE function calls: 319 * npi_txdma_control_status() 320 * 321 * Registers accessed: 322 * TX_CS DMC+0x40028 Transmit Control And Status 323 * 324 * Context: 325 * Any domain 326 */ 327 nxge_status_t 328 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 329 uint64_t reg_data) 330 { 331 npi_handle_t handle; 332 npi_status_t rs = NPI_SUCCESS; 333 nxge_status_t status = NXGE_OK; 334 335 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 336 "<== nxge_init_txdma_channel_cntl_stat")); 337 338 handle = NXGE_DEV_NPI_HANDLE(nxgep); 339 rs = npi_txdma_control_status(handle, OP_SET, channel, 340 (p_tx_cs_t)®_data); 341 342 if (rs != NPI_SUCCESS) { 343 status = NXGE_ERROR | rs; 344 } 345 346 return (status); 347 } 348 349 /* 350 * nxge_enable_txdma_channel 351 * 352 * Enable a TDC. 353 * 354 * Arguments: 355 * nxgep 356 * channel The channel to enable. 357 * tx_desc_p channel's transmit descriptor ring. 358 * mbox_p channel's mailbox, 359 * 360 * Notes: 361 * 362 * NPI/NXGE function calls: 363 * npi_txdma_ring_config() 364 * npi_txdma_mbox_config() 365 * npi_txdma_channel_init_enable() 366 * 367 * Registers accessed: 368 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 369 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 370 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 371 * TX_CS DMC+0x40028 Transmit Control And Status 372 * 373 * Context: 374 * Any domain 375 */ 376 nxge_status_t 377 nxge_enable_txdma_channel(p_nxge_t nxgep, 378 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 379 { 380 npi_handle_t handle; 381 npi_status_t rs = NPI_SUCCESS; 382 nxge_status_t status = NXGE_OK; 383 384 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 385 386 handle = NXGE_DEV_NPI_HANDLE(nxgep); 387 /* 388 * Use configuration data composed at init time. 389 * Write to hardware the transmit ring configurations. 390 */ 391 rs = npi_txdma_ring_config(handle, OP_SET, channel, 392 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 393 394 if (rs != NPI_SUCCESS) { 395 return (NXGE_ERROR | rs); 396 } 397 398 if (isLDOMguest(nxgep)) { 399 /* Add interrupt handler for this channel. */ 400 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 401 return (NXGE_ERROR); 402 } 403 404 /* Write to hardware the mailbox */ 405 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 406 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 407 408 if (rs != NPI_SUCCESS) { 409 return (NXGE_ERROR | rs); 410 } 411 412 /* Start the DMA engine. */ 413 rs = npi_txdma_channel_init_enable(handle, channel); 414 415 if (rs != NPI_SUCCESS) { 416 return (NXGE_ERROR | rs); 417 } 418 419 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 420 421 return (status); 422 } 423 424 void 425 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 426 boolean_t l4_cksum, int pkt_len, uint8_t npads, 427 p_tx_pkt_hdr_all_t pkthdrp, 428 t_uscalar_t start_offset, 429 t_uscalar_t stuff_offset) 430 { 431 p_tx_pkt_header_t hdrp; 432 p_mblk_t nmp; 433 uint64_t tmp; 434 size_t mblk_len; 435 size_t iph_len; 436 size_t hdrs_size; 437 uint8_t hdrs_buf[sizeof (struct ether_header) + 438 64 + sizeof (uint32_t)]; 439 uint8_t *cursor; 440 uint8_t *ip_buf; 441 uint16_t eth_type; 442 uint8_t ipproto; 443 boolean_t is_vlan = B_FALSE; 444 size_t eth_hdr_size; 445 446 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 447 448 /* 449 * Caller should zero out the headers first. 450 */ 451 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 452 453 if (fill_len) { 454 NXGE_DEBUG_MSG((NULL, TX_CTL, 455 "==> nxge_fill_tx_hdr: pkt_len %d " 456 "npads %d", pkt_len, npads)); 457 tmp = (uint64_t)pkt_len; 458 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 459 goto fill_tx_header_done; 460 } 461 462 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 463 464 /* 465 * mp is the original data packet (does not include the 466 * Neptune transmit header). 467 */ 468 nmp = mp; 469 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 470 "mp $%p b_rptr $%p len %d", 471 mp, nmp->b_rptr, MBLKL(nmp))); 472 /* copy ether_header from mblk to hdrs_buf */ 473 cursor = &hdrs_buf[0]; 474 tmp = sizeof (struct ether_vlan_header); 475 while ((nmp != NULL) && (tmp > 0)) { 476 size_t buflen; 477 mblk_len = MBLKL(nmp); 478 buflen = min((size_t)tmp, mblk_len); 479 bcopy(nmp->b_rptr, cursor, buflen); 480 cursor += buflen; 481 tmp -= buflen; 482 nmp = nmp->b_cont; 483 } 484 485 nmp = mp; 486 mblk_len = MBLKL(nmp); 487 ip_buf = NULL; 488 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 489 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 490 "ether type 0x%x", eth_type, hdrp->value)); 491 492 if (eth_type < ETHERMTU) { 493 tmp = 1ull; 494 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 495 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 496 "value 0x%llx", hdrp->value)); 497 if (*(hdrs_buf + sizeof (struct ether_header)) 498 == LLC_SNAP_SAP) { 499 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 500 sizeof (struct ether_header) + 6))); 501 NXGE_DEBUG_MSG((NULL, TX_CTL, 502 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 503 eth_type)); 504 } else { 505 goto fill_tx_header_done; 506 } 507 } else if (eth_type == VLAN_ETHERTYPE) { 508 tmp = 1ull; 509 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 510 511 eth_type = ntohs(((struct ether_vlan_header *) 512 hdrs_buf)->ether_type); 513 is_vlan = B_TRUE; 514 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 515 "value 0x%llx", hdrp->value)); 516 } 517 518 if (!is_vlan) { 519 eth_hdr_size = sizeof (struct ether_header); 520 } else { 521 eth_hdr_size = sizeof (struct ether_vlan_header); 522 } 523 524 switch (eth_type) { 525 case ETHERTYPE_IP: 526 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 527 ip_buf = nmp->b_rptr + eth_hdr_size; 528 mblk_len -= eth_hdr_size; 529 iph_len = ((*ip_buf) & 0x0f); 530 if (mblk_len > (iph_len + sizeof (uint32_t))) { 531 ip_buf = nmp->b_rptr; 532 ip_buf += eth_hdr_size; 533 } else { 534 ip_buf = NULL; 535 } 536 537 } 538 if (ip_buf == NULL) { 539 hdrs_size = 0; 540 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 541 while ((nmp) && (hdrs_size < 542 sizeof (hdrs_buf))) { 543 mblk_len = (size_t)nmp->b_wptr - 544 (size_t)nmp->b_rptr; 545 if (mblk_len >= 546 (sizeof (hdrs_buf) - hdrs_size)) 547 mblk_len = sizeof (hdrs_buf) - 548 hdrs_size; 549 bcopy(nmp->b_rptr, 550 &hdrs_buf[hdrs_size], mblk_len); 551 hdrs_size += mblk_len; 552 nmp = nmp->b_cont; 553 } 554 ip_buf = hdrs_buf; 555 ip_buf += eth_hdr_size; 556 iph_len = ((*ip_buf) & 0x0f); 557 } 558 559 ipproto = ip_buf[9]; 560 561 tmp = (uint64_t)iph_len; 562 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 563 tmp = (uint64_t)(eth_hdr_size >> 1); 564 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 565 566 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 567 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 568 "tmp 0x%x", 569 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 570 ipproto, tmp)); 571 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 572 "value 0x%llx", hdrp->value)); 573 574 break; 575 576 case ETHERTYPE_IPV6: 577 hdrs_size = 0; 578 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 579 while ((nmp) && (hdrs_size < 580 sizeof (hdrs_buf))) { 581 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 582 if (mblk_len >= 583 (sizeof (hdrs_buf) - hdrs_size)) 584 mblk_len = sizeof (hdrs_buf) - 585 hdrs_size; 586 bcopy(nmp->b_rptr, 587 &hdrs_buf[hdrs_size], mblk_len); 588 hdrs_size += mblk_len; 589 nmp = nmp->b_cont; 590 } 591 ip_buf = hdrs_buf; 592 ip_buf += eth_hdr_size; 593 594 tmp = 1ull; 595 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 596 597 tmp = (eth_hdr_size >> 1); 598 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 599 600 /* byte 6 is the next header protocol */ 601 ipproto = ip_buf[6]; 602 603 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 604 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 605 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 606 ipproto)); 607 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 608 "value 0x%llx", hdrp->value)); 609 610 break; 611 612 default: 613 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 614 goto fill_tx_header_done; 615 } 616 617 switch (ipproto) { 618 case IPPROTO_TCP: 619 NXGE_DEBUG_MSG((NULL, TX_CTL, 620 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 621 if (l4_cksum) { 622 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 623 hdrp->value |= 624 (((uint64_t)(start_offset >> 1)) << 625 TX_PKT_HEADER_L4START_SHIFT); 626 hdrp->value |= 627 (((uint64_t)(stuff_offset >> 1)) << 628 TX_PKT_HEADER_L4STUFF_SHIFT); 629 630 NXGE_DEBUG_MSG((NULL, TX_CTL, 631 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 632 "value 0x%llx", hdrp->value)); 633 } 634 635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 636 "value 0x%llx", hdrp->value)); 637 break; 638 639 case IPPROTO_UDP: 640 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 641 if (l4_cksum) { 642 if (!nxge_cksum_offload) { 643 uint16_t *up; 644 uint16_t cksum; 645 t_uscalar_t stuff_len; 646 647 /* 648 * The checksum field has the 649 * partial checksum. 650 * IP_CSUM() macro calls ip_cksum() which 651 * can add in the partial checksum. 652 */ 653 cksum = IP_CSUM(mp, start_offset, 0); 654 stuff_len = stuff_offset; 655 nmp = mp; 656 mblk_len = MBLKL(nmp); 657 while ((nmp != NULL) && 658 (mblk_len < stuff_len)) { 659 stuff_len -= mblk_len; 660 nmp = nmp->b_cont; 661 } 662 ASSERT(nmp); 663 up = (uint16_t *)(nmp->b_rptr + stuff_len); 664 665 *up = cksum; 666 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 667 NXGE_DEBUG_MSG((NULL, TX_CTL, 668 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 669 "use sw cksum " 670 "write to $%p cksum 0x%x content up 0x%x", 671 stuff_len, 672 up, 673 cksum, 674 *up)); 675 } else { 676 /* Hardware will compute the full checksum */ 677 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 678 hdrp->value |= 679 (((uint64_t)(start_offset >> 1)) << 680 TX_PKT_HEADER_L4START_SHIFT); 681 hdrp->value |= 682 (((uint64_t)(stuff_offset >> 1)) << 683 TX_PKT_HEADER_L4STUFF_SHIFT); 684 685 NXGE_DEBUG_MSG((NULL, TX_CTL, 686 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 687 " use partial checksum " 688 "cksum 0x%x ", 689 "value 0x%llx", 690 stuff_offset, 691 IP_CSUM(mp, start_offset, 0), 692 hdrp->value)); 693 } 694 } 695 696 NXGE_DEBUG_MSG((NULL, TX_CTL, 697 "==> nxge_tx_pkt_hdr_init: UDP" 698 "value 0x%llx", hdrp->value)); 699 break; 700 701 default: 702 goto fill_tx_header_done; 703 } 704 705 fill_tx_header_done: 706 NXGE_DEBUG_MSG((NULL, TX_CTL, 707 "==> nxge_fill_tx_hdr: pkt_len %d " 708 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 709 710 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 711 } 712 713 /*ARGSUSED*/ 714 p_mblk_t 715 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 716 { 717 p_mblk_t newmp = NULL; 718 719 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 720 NXGE_DEBUG_MSG((NULL, TX_CTL, 721 "<== nxge_tx_pkt_header_reserve: allocb failed")); 722 return (NULL); 723 } 724 725 NXGE_DEBUG_MSG((NULL, TX_CTL, 726 "==> nxge_tx_pkt_header_reserve: get new mp")); 727 DB_TYPE(newmp) = M_DATA; 728 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 729 linkb(newmp, mp); 730 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 731 732 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 733 "b_rptr $%p b_wptr $%p", 734 newmp->b_rptr, newmp->b_wptr)); 735 736 NXGE_DEBUG_MSG((NULL, TX_CTL, 737 "<== nxge_tx_pkt_header_reserve: use new mp")); 738 739 return (newmp); 740 } 741 742 int 743 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 744 { 745 uint_t nmblks; 746 ssize_t len; 747 uint_t pkt_len; 748 p_mblk_t nmp, bmp, tmp; 749 uint8_t *b_wptr; 750 751 NXGE_DEBUG_MSG((NULL, TX_CTL, 752 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 753 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 754 755 nmp = mp; 756 bmp = mp; 757 nmblks = 0; 758 pkt_len = 0; 759 *tot_xfer_len_p = 0; 760 761 while (nmp) { 762 len = MBLKL(nmp); 763 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 764 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 765 len, pkt_len, nmblks, 766 *tot_xfer_len_p)); 767 768 if (len <= 0) { 769 bmp = nmp; 770 nmp = nmp->b_cont; 771 NXGE_DEBUG_MSG((NULL, TX_CTL, 772 "==> nxge_tx_pkt_nmblocks: " 773 "len (0) pkt_len %d nmblks %d", 774 pkt_len, nmblks)); 775 continue; 776 } 777 778 *tot_xfer_len_p += len; 779 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 780 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 781 len, pkt_len, nmblks, 782 *tot_xfer_len_p)); 783 784 if (len < nxge_bcopy_thresh) { 785 NXGE_DEBUG_MSG((NULL, TX_CTL, 786 "==> nxge_tx_pkt_nmblocks: " 787 "len %d (< thresh) pkt_len %d nmblks %d", 788 len, pkt_len, nmblks)); 789 if (pkt_len == 0) 790 nmblks++; 791 pkt_len += len; 792 if (pkt_len >= nxge_bcopy_thresh) { 793 pkt_len = 0; 794 len = 0; 795 nmp = bmp; 796 } 797 } else { 798 NXGE_DEBUG_MSG((NULL, TX_CTL, 799 "==> nxge_tx_pkt_nmblocks: " 800 "len %d (> thresh) pkt_len %d nmblks %d", 801 len, pkt_len, nmblks)); 802 pkt_len = 0; 803 nmblks++; 804 /* 805 * Hardware limits the transfer length to 4K. 806 * If len is more than 4K, we need to break 807 * it up to at most 2 more blocks. 808 */ 809 if (len > TX_MAX_TRANSFER_LENGTH) { 810 uint32_t nsegs; 811 812 nsegs = 1; 813 NXGE_DEBUG_MSG((NULL, TX_CTL, 814 "==> nxge_tx_pkt_nmblocks: " 815 "len %d pkt_len %d nmblks %d nsegs %d", 816 len, pkt_len, nmblks, nsegs)); 817 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 818 ++nsegs; 819 } 820 do { 821 b_wptr = nmp->b_rptr + 822 TX_MAX_TRANSFER_LENGTH; 823 nmp->b_wptr = b_wptr; 824 if ((tmp = dupb(nmp)) == NULL) { 825 return (0); 826 } 827 tmp->b_rptr = b_wptr; 828 tmp->b_wptr = nmp->b_wptr; 829 tmp->b_cont = nmp->b_cont; 830 nmp->b_cont = tmp; 831 nmblks++; 832 if (--nsegs) { 833 nmp = tmp; 834 } 835 } while (nsegs); 836 nmp = tmp; 837 } 838 } 839 840 /* 841 * Hardware limits the transmit gather pointers to 15. 842 */ 843 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 844 TX_MAX_GATHER_POINTERS) { 845 NXGE_DEBUG_MSG((NULL, TX_CTL, 846 "==> nxge_tx_pkt_nmblocks: pull msg - " 847 "len %d pkt_len %d nmblks %d", 848 len, pkt_len, nmblks)); 849 /* Pull all message blocks from b_cont */ 850 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 851 return (0); 852 } 853 freemsg(nmp->b_cont); 854 nmp->b_cont = tmp; 855 pkt_len = 0; 856 } 857 bmp = nmp; 858 nmp = nmp->b_cont; 859 } 860 861 NXGE_DEBUG_MSG((NULL, TX_CTL, 862 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 863 "nmblks %d len %d tot_xfer_len %d", 864 mp->b_rptr, mp->b_wptr, nmblks, 865 MBLKL(mp), *tot_xfer_len_p)); 866 867 return (nmblks); 868 } 869 870 boolean_t 871 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 872 { 873 boolean_t status = B_TRUE; 874 p_nxge_dma_common_t tx_desc_dma_p; 875 nxge_dma_common_t desc_area; 876 p_tx_desc_t tx_desc_ring_vp; 877 p_tx_desc_t tx_desc_p; 878 p_tx_desc_t tx_desc_pp; 879 tx_desc_t r_tx_desc; 880 p_tx_msg_t tx_msg_ring; 881 p_tx_msg_t tx_msg_p; 882 npi_handle_t handle; 883 tx_ring_hdl_t tx_head; 884 uint32_t pkt_len; 885 uint_t tx_rd_index; 886 uint16_t head_index, tail_index; 887 uint8_t tdc; 888 boolean_t head_wrap, tail_wrap; 889 p_nxge_tx_ring_stats_t tdc_stats; 890 int rc; 891 892 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 893 894 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 895 (nmblks != 0)); 896 NXGE_DEBUG_MSG((nxgep, TX_CTL, 897 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 898 tx_ring_p->descs_pending, nxge_reclaim_pending, 899 nmblks)); 900 if (!status) { 901 tx_desc_dma_p = &tx_ring_p->tdc_desc; 902 desc_area = tx_ring_p->tdc_desc; 903 handle = NXGE_DEV_NPI_HANDLE(nxgep); 904 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 905 tx_desc_ring_vp = 906 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 907 tx_rd_index = tx_ring_p->rd_index; 908 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 909 tx_msg_ring = tx_ring_p->tx_msg_ring; 910 tx_msg_p = &tx_msg_ring[tx_rd_index]; 911 tdc = tx_ring_p->tdc; 912 tdc_stats = tx_ring_p->tdc_stats; 913 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 914 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 915 } 916 917 tail_index = tx_ring_p->wr_index; 918 tail_wrap = tx_ring_p->wr_index_wrap; 919 920 NXGE_DEBUG_MSG((nxgep, TX_CTL, 921 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 922 "tail_index %d tail_wrap %d " 923 "tx_desc_p $%p ($%p) ", 924 tdc, tx_rd_index, tail_index, tail_wrap, 925 tx_desc_p, (*(uint64_t *)tx_desc_p))); 926 /* 927 * Read the hardware maintained transmit head 928 * and wrap around bit. 929 */ 930 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 931 head_index = tx_head.bits.ldw.head; 932 head_wrap = tx_head.bits.ldw.wrap; 933 NXGE_DEBUG_MSG((nxgep, TX_CTL, 934 "==> nxge_txdma_reclaim: " 935 "tx_rd_index %d tail %d tail_wrap %d " 936 "head %d wrap %d", 937 tx_rd_index, tail_index, tail_wrap, 938 head_index, head_wrap)); 939 940 if (head_index == tail_index) { 941 if (TXDMA_RING_EMPTY(head_index, head_wrap, 942 tail_index, tail_wrap) && 943 (head_index == tx_rd_index)) { 944 NXGE_DEBUG_MSG((nxgep, TX_CTL, 945 "==> nxge_txdma_reclaim: EMPTY")); 946 return (B_TRUE); 947 } 948 949 NXGE_DEBUG_MSG((nxgep, TX_CTL, 950 "==> nxge_txdma_reclaim: Checking " 951 "if ring full")); 952 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 953 tail_wrap)) { 954 NXGE_DEBUG_MSG((nxgep, TX_CTL, 955 "==> nxge_txdma_reclaim: full")); 956 return (B_FALSE); 957 } 958 } 959 960 NXGE_DEBUG_MSG((nxgep, TX_CTL, 961 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 962 963 tx_desc_pp = &r_tx_desc; 964 while ((tx_rd_index != head_index) && 965 (tx_ring_p->descs_pending != 0)) { 966 967 NXGE_DEBUG_MSG((nxgep, TX_CTL, 968 "==> nxge_txdma_reclaim: Checking if pending")); 969 970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 971 "==> nxge_txdma_reclaim: " 972 "descs_pending %d ", 973 tx_ring_p->descs_pending)); 974 975 NXGE_DEBUG_MSG((nxgep, TX_CTL, 976 "==> nxge_txdma_reclaim: " 977 "(tx_rd_index %d head_index %d " 978 "(tx_desc_p $%p)", 979 tx_rd_index, head_index, 980 tx_desc_p)); 981 982 tx_desc_pp->value = tx_desc_p->value; 983 NXGE_DEBUG_MSG((nxgep, TX_CTL, 984 "==> nxge_txdma_reclaim: " 985 "(tx_rd_index %d head_index %d " 986 "tx_desc_p $%p (desc value 0x%llx) ", 987 tx_rd_index, head_index, 988 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 989 990 NXGE_DEBUG_MSG((nxgep, TX_CTL, 991 "==> nxge_txdma_reclaim: dump desc:")); 992 993 pkt_len = tx_desc_pp->bits.hdw.tr_len; 994 tdc_stats->obytes += pkt_len; 995 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 996 NXGE_DEBUG_MSG((nxgep, TX_CTL, 997 "==> nxge_txdma_reclaim: pkt_len %d " 998 "tdc channel %d opackets %d", 999 pkt_len, 1000 tdc, 1001 tdc_stats->opackets)); 1002 1003 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1004 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1005 "tx_desc_p = $%p " 1006 "tx_desc_pp = $%p " 1007 "index = %d", 1008 tx_desc_p, 1009 tx_desc_pp, 1010 tx_ring_p->rd_index)); 1011 (void) dvma_unload(tx_msg_p->dvma_handle, 1012 0, -1); 1013 tx_msg_p->dvma_handle = NULL; 1014 if (tx_ring_p->dvma_wr_index == 1015 tx_ring_p->dvma_wrap_mask) { 1016 tx_ring_p->dvma_wr_index = 0; 1017 } else { 1018 tx_ring_p->dvma_wr_index++; 1019 } 1020 tx_ring_p->dvma_pending--; 1021 } else if (tx_msg_p->flags.dma_type == 1022 USE_DMA) { 1023 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1024 "==> nxge_txdma_reclaim: " 1025 "USE DMA")); 1026 if (rc = ddi_dma_unbind_handle 1027 (tx_msg_p->dma_handle)) { 1028 cmn_err(CE_WARN, "!nxge_reclaim: " 1029 "ddi_dma_unbind_handle " 1030 "failed. status %d", rc); 1031 } 1032 } 1033 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1034 "==> nxge_txdma_reclaim: count packets")); 1035 /* 1036 * count a chained packet only once. 1037 */ 1038 if (tx_msg_p->tx_message != NULL) { 1039 freemsg(tx_msg_p->tx_message); 1040 tx_msg_p->tx_message = NULL; 1041 } 1042 1043 tx_msg_p->flags.dma_type = USE_NONE; 1044 tx_rd_index = tx_ring_p->rd_index; 1045 tx_rd_index = (tx_rd_index + 1) & 1046 tx_ring_p->tx_wrap_mask; 1047 tx_ring_p->rd_index = tx_rd_index; 1048 tx_ring_p->descs_pending--; 1049 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1050 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1051 } 1052 1053 status = (nmblks <= (tx_ring_p->tx_ring_size - 1054 tx_ring_p->descs_pending - 1055 TX_FULL_MARK)); 1056 if (status) { 1057 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1058 } 1059 } else { 1060 status = (nmblks <= 1061 (tx_ring_p->tx_ring_size - 1062 tx_ring_p->descs_pending - 1063 TX_FULL_MARK)); 1064 } 1065 1066 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1067 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1068 1069 return (status); 1070 } 1071 1072 /* 1073 * nxge_tx_intr 1074 * 1075 * Process a TDC interrupt 1076 * 1077 * Arguments: 1078 * arg1 A Logical Device state Vector (LSV) data structure. 1079 * arg2 nxge_t * 1080 * 1081 * Notes: 1082 * 1083 * NPI/NXGE function calls: 1084 * npi_txdma_control_status() 1085 * npi_intr_ldg_mgmt_set() 1086 * 1087 * nxge_tx_err_evnts() 1088 * nxge_txdma_reclaim() 1089 * 1090 * Registers accessed: 1091 * TX_CS DMC+0x40028 Transmit Control And Status 1092 * PIO_LDSV 1093 * 1094 * Context: 1095 * Any domain 1096 */ 1097 uint_t 1098 nxge_tx_intr(void *arg1, void *arg2) 1099 { 1100 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1101 p_nxge_t nxgep = (p_nxge_t)arg2; 1102 p_nxge_ldg_t ldgp; 1103 uint8_t channel; 1104 uint32_t vindex; 1105 npi_handle_t handle; 1106 tx_cs_t cs; 1107 p_tx_ring_t *tx_rings; 1108 p_tx_ring_t tx_ring_p; 1109 npi_status_t rs = NPI_SUCCESS; 1110 uint_t serviced = DDI_INTR_UNCLAIMED; 1111 nxge_status_t status = NXGE_OK; 1112 1113 if (ldvp == NULL) { 1114 NXGE_DEBUG_MSG((NULL, INT_CTL, 1115 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1116 nxgep, ldvp)); 1117 return (DDI_INTR_UNCLAIMED); 1118 } 1119 1120 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1121 nxgep = ldvp->nxgep; 1122 } 1123 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1124 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1125 nxgep, ldvp)); 1126 1127 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1128 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1129 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1130 "<== nxge_tx_intr: interface not started or intialized")); 1131 return (DDI_INTR_CLAIMED); 1132 } 1133 1134 /* 1135 * This interrupt handler is for a specific 1136 * transmit dma channel. 1137 */ 1138 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1139 /* Get the control and status for this channel. */ 1140 channel = ldvp->channel; 1141 ldgp = ldvp->ldgp; 1142 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1143 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1144 "channel %d", 1145 nxgep, ldvp, channel)); 1146 1147 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1148 vindex = ldvp->vdma_index; 1149 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1150 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1151 channel, vindex, rs)); 1152 if (!rs && cs.bits.ldw.mk) { 1153 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1154 "==> nxge_tx_intr:channel %d ring index %d " 1155 "status 0x%08x (mk bit set)", 1156 channel, vindex, rs)); 1157 tx_rings = nxgep->tx_rings->rings; 1158 tx_ring_p = tx_rings[vindex]; 1159 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1160 "==> nxge_tx_intr:channel %d ring index %d " 1161 "status 0x%08x (mk bit set, calling reclaim)", 1162 channel, vindex, rs)); 1163 1164 MUTEX_ENTER(&tx_ring_p->lock); 1165 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 1166 MUTEX_EXIT(&tx_ring_p->lock); 1167 mac_tx_update(nxgep->mach); 1168 } 1169 1170 /* 1171 * Process other transmit control and status. 1172 * Check the ldv state. 1173 */ 1174 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1175 /* 1176 * Rearm this logical group if this is a single device 1177 * group. 1178 */ 1179 if (ldgp->nldvs == 1) { 1180 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1181 "==> nxge_tx_intr: rearm")); 1182 if (status == NXGE_OK) { 1183 if (isLDOMguest(nxgep)) { 1184 nxge_hio_ldgimgn(nxgep, ldgp); 1185 } else { 1186 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1187 B_TRUE, ldgp->ldg_timer); 1188 } 1189 } 1190 } 1191 1192 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1193 serviced = DDI_INTR_CLAIMED; 1194 return (serviced); 1195 } 1196 1197 void 1198 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1199 { 1200 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1201 1202 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1203 1204 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1205 } 1206 1207 void 1208 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1209 { 1210 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1211 1212 (void) nxge_txdma_stop(nxgep); 1213 1214 (void) nxge_fixup_txdma_rings(nxgep); 1215 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1216 (void) nxge_tx_mac_enable(nxgep); 1217 (void) nxge_txdma_hw_kick(nxgep); 1218 1219 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1220 } 1221 1222 npi_status_t 1223 nxge_txdma_channel_disable( 1224 nxge_t *nxge, 1225 int channel) 1226 { 1227 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1228 npi_status_t rs; 1229 tdmc_intr_dbg_t intr_dbg; 1230 1231 /* 1232 * Stop the dma channel and wait for the stop-done. 1233 * If the stop-done bit is not present, then force 1234 * an error so TXC will stop. 1235 * All channels bound to this port need to be stopped 1236 * and reset after injecting an interrupt error. 1237 */ 1238 rs = npi_txdma_channel_disable(handle, channel); 1239 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1240 "==> nxge_txdma_channel_disable(%d) " 1241 "rs 0x%x", channel, rs)); 1242 if (rs != NPI_SUCCESS) { 1243 /* Inject any error */ 1244 intr_dbg.value = 0; 1245 intr_dbg.bits.ldw.nack_pref = 1; 1246 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1247 "==> nxge_txdma_hw_mode: " 1248 "channel %d (stop failed 0x%x) " 1249 "(inject err)", rs, channel)); 1250 (void) npi_txdma_inj_int_error_set( 1251 handle, channel, &intr_dbg); 1252 rs = npi_txdma_channel_disable(handle, channel); 1253 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1254 "==> nxge_txdma_hw_mode: " 1255 "channel %d (stop again 0x%x) " 1256 "(after inject err)", 1257 rs, channel)); 1258 } 1259 1260 return (rs); 1261 } 1262 1263 /* 1264 * nxge_txdma_hw_mode 1265 * 1266 * Toggle all TDCs on (enable) or off (disable). 1267 * 1268 * Arguments: 1269 * nxgep 1270 * enable Enable or disable a TDC. 1271 * 1272 * Notes: 1273 * 1274 * NPI/NXGE function calls: 1275 * npi_txdma_channel_enable(TX_CS) 1276 * npi_txdma_channel_disable(TX_CS) 1277 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1278 * 1279 * Registers accessed: 1280 * TX_CS DMC+0x40028 Transmit Control And Status 1281 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1282 * 1283 * Context: 1284 * Any domain 1285 */ 1286 nxge_status_t 1287 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1288 { 1289 nxge_grp_set_t *set = &nxgep->tx_set; 1290 1291 npi_handle_t handle; 1292 nxge_status_t status; 1293 npi_status_t rs; 1294 int tdc; 1295 1296 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1297 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1298 1299 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1300 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1301 "<== nxge_txdma_mode: not initialized")); 1302 return (NXGE_ERROR); 1303 } 1304 1305 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1306 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1307 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1308 return (NXGE_ERROR); 1309 } 1310 1311 /* Enable or disable all of the TDCs owned by us. */ 1312 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1313 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1314 if ((1 << tdc) & set->owned.map) { 1315 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1316 if (ring) { 1317 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1318 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1319 if (enable) { 1320 rs = npi_txdma_channel_enable 1321 (handle, tdc); 1322 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1323 "==> nxge_txdma_hw_mode: " 1324 "channel %d (enable) rs 0x%x", 1325 tdc, rs)); 1326 } else { 1327 rs = nxge_txdma_channel_disable 1328 (nxgep, tdc); 1329 } 1330 } 1331 } 1332 } 1333 1334 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1335 1336 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1337 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1338 1339 return (status); 1340 } 1341 1342 void 1343 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1344 { 1345 npi_handle_t handle; 1346 1347 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1348 "==> nxge_txdma_enable_channel: channel %d", channel)); 1349 1350 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1351 /* enable the transmit dma channels */ 1352 (void) npi_txdma_channel_enable(handle, channel); 1353 1354 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1355 } 1356 1357 void 1358 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1359 { 1360 npi_handle_t handle; 1361 1362 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1363 "==> nxge_txdma_disable_channel: channel %d", channel)); 1364 1365 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1366 /* stop the transmit dma channels */ 1367 (void) npi_txdma_channel_disable(handle, channel); 1368 1369 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1370 } 1371 1372 /* 1373 * nxge_txdma_stop_inj_err 1374 * 1375 * Stop a TDC. If at first we don't succeed, inject an error. 1376 * 1377 * Arguments: 1378 * nxgep 1379 * channel The channel to stop. 1380 * 1381 * Notes: 1382 * 1383 * NPI/NXGE function calls: 1384 * npi_txdma_channel_disable() 1385 * npi_txdma_inj_int_error_set() 1386 * #if defined(NXGE_DEBUG) 1387 * nxge_txdma_regs_dump_channels(nxgep); 1388 * #endif 1389 * 1390 * Registers accessed: 1391 * TX_CS DMC+0x40028 Transmit Control And Status 1392 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1393 * 1394 * Context: 1395 * Any domain 1396 */ 1397 int 1398 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1399 { 1400 npi_handle_t handle; 1401 tdmc_intr_dbg_t intr_dbg; 1402 int status; 1403 npi_status_t rs = NPI_SUCCESS; 1404 1405 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1406 /* 1407 * Stop the dma channel waits for the stop done. 1408 * If the stop done bit is not set, then create 1409 * an error. 1410 */ 1411 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1412 rs = npi_txdma_channel_disable(handle, channel); 1413 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1414 if (status == NXGE_OK) { 1415 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1416 "<== nxge_txdma_stop_inj_err (channel %d): " 1417 "stopped OK", channel)); 1418 return (status); 1419 } 1420 1421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1422 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1423 "injecting error", channel, rs)); 1424 /* Inject any error */ 1425 intr_dbg.value = 0; 1426 intr_dbg.bits.ldw.nack_pref = 1; 1427 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1428 1429 /* Stop done bit will be set as a result of error injection */ 1430 rs = npi_txdma_channel_disable(handle, channel); 1431 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1432 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1433 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1434 "<== nxge_txdma_stop_inj_err (channel %d): " 1435 "stopped OK ", channel)); 1436 return (status); 1437 } 1438 1439 #if defined(NXGE_DEBUG) 1440 nxge_txdma_regs_dump_channels(nxgep); 1441 #endif 1442 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1443 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1444 " (injected error but still not stopped)", channel, rs)); 1445 1446 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1447 return (status); 1448 } 1449 1450 /*ARGSUSED*/ 1451 void 1452 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1453 { 1454 nxge_grp_set_t *set = &nxgep->tx_set; 1455 int tdc; 1456 1457 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1458 1459 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1460 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1461 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1462 return; 1463 } 1464 1465 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1466 if ((1 << tdc) & set->owned.map) { 1467 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1468 if (ring) { 1469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1470 "==> nxge_fixup_txdma_rings: channel %d", 1471 tdc)); 1472 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1473 } 1474 } 1475 } 1476 1477 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1478 } 1479 1480 /*ARGSUSED*/ 1481 void 1482 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1483 { 1484 p_tx_ring_t ring_p; 1485 1486 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1487 ring_p = nxge_txdma_get_ring(nxgep, channel); 1488 if (ring_p == NULL) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1490 return; 1491 } 1492 1493 if (ring_p->tdc != channel) { 1494 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1495 "<== nxge_txdma_fix_channel: channel not matched " 1496 "ring tdc %d passed channel", 1497 ring_p->tdc, channel)); 1498 return; 1499 } 1500 1501 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1502 1503 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1504 } 1505 1506 /*ARGSUSED*/ 1507 void 1508 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1509 { 1510 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1511 1512 if (ring_p == NULL) { 1513 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1514 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1515 return; 1516 } 1517 1518 if (ring_p->tdc != channel) { 1519 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1520 "<== nxge_txdma_fixup_channel: channel not matched " 1521 "ring tdc %d passed channel", 1522 ring_p->tdc, channel)); 1523 return; 1524 } 1525 1526 MUTEX_ENTER(&ring_p->lock); 1527 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1528 ring_p->rd_index = 0; 1529 ring_p->wr_index = 0; 1530 ring_p->ring_head.value = 0; 1531 ring_p->ring_kick_tail.value = 0; 1532 ring_p->descs_pending = 0; 1533 MUTEX_EXIT(&ring_p->lock); 1534 1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1536 } 1537 1538 /*ARGSUSED*/ 1539 void 1540 nxge_txdma_hw_kick(p_nxge_t nxgep) 1541 { 1542 nxge_grp_set_t *set = &nxgep->tx_set; 1543 int tdc; 1544 1545 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1546 1547 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1548 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1549 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1550 return; 1551 } 1552 1553 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1554 if ((1 << tdc) & set->owned.map) { 1555 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1556 if (ring) { 1557 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1558 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1559 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1560 } 1561 } 1562 } 1563 1564 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1565 } 1566 1567 /*ARGSUSED*/ 1568 void 1569 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1570 { 1571 p_tx_ring_t ring_p; 1572 1573 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1574 1575 ring_p = nxge_txdma_get_ring(nxgep, channel); 1576 if (ring_p == NULL) { 1577 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1578 " nxge_txdma_kick_channel")); 1579 return; 1580 } 1581 1582 if (ring_p->tdc != channel) { 1583 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1584 "<== nxge_txdma_kick_channel: channel not matched " 1585 "ring tdc %d passed channel", 1586 ring_p->tdc, channel)); 1587 return; 1588 } 1589 1590 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1591 1592 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1593 } 1594 1595 /*ARGSUSED*/ 1596 void 1597 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1598 { 1599 1600 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1601 1602 if (ring_p == NULL) { 1603 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1604 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1605 return; 1606 } 1607 1608 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1609 } 1610 1611 /* 1612 * nxge_check_tx_hang 1613 * 1614 * Check the state of all TDCs belonging to nxgep. 1615 * 1616 * Arguments: 1617 * nxgep 1618 * 1619 * Notes: 1620 * Called by nxge_hw.c:nxge_check_hw_state(). 1621 * 1622 * NPI/NXGE function calls: 1623 * 1624 * Registers accessed: 1625 * 1626 * Context: 1627 * Any domain 1628 */ 1629 /*ARGSUSED*/ 1630 void 1631 nxge_check_tx_hang(p_nxge_t nxgep) 1632 { 1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1634 1635 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1636 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1637 goto nxge_check_tx_hang_exit; 1638 } 1639 1640 /* 1641 * Needs inputs from hardware for regs: 1642 * head index had not moved since last timeout. 1643 * packets not transmitted or stuffed registers. 1644 */ 1645 if (nxge_txdma_hung(nxgep)) { 1646 nxge_fixup_hung_txdma_rings(nxgep); 1647 } 1648 1649 nxge_check_tx_hang_exit: 1650 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1651 } 1652 1653 /* 1654 * nxge_txdma_hung 1655 * 1656 * Reset a TDC. 1657 * 1658 * Arguments: 1659 * nxgep 1660 * channel The channel to reset. 1661 * reg_data The current TX_CS. 1662 * 1663 * Notes: 1664 * Called by nxge_check_tx_hang() 1665 * 1666 * NPI/NXGE function calls: 1667 * nxge_txdma_channel_hung() 1668 * 1669 * Registers accessed: 1670 * 1671 * Context: 1672 * Any domain 1673 */ 1674 int 1675 nxge_txdma_hung(p_nxge_t nxgep) 1676 { 1677 nxge_grp_set_t *set = &nxgep->tx_set; 1678 int tdc; 1679 boolean_t shared; 1680 1681 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1682 1683 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1684 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1685 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1686 return (B_FALSE); 1687 } 1688 1689 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1690 /* 1691 * Grab the shared state of the TDC. 1692 */ 1693 if (isLDOMservice(nxgep)) { 1694 nxge_hio_data_t *nhd = 1695 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1696 1697 MUTEX_ENTER(&nhd->lock); 1698 shared = nxgep->tdc_is_shared[tdc]; 1699 MUTEX_EXIT(&nhd->lock); 1700 } else { 1701 shared = B_FALSE; 1702 } 1703 1704 /* 1705 * Now, process continue to process. 1706 */ 1707 if (((1 << tdc) & set->owned.map) && !shared) { 1708 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1709 if (ring) { 1710 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1711 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1712 "==> nxge_txdma_hung: TDC %d hung", 1713 tdc)); 1714 return (B_TRUE); 1715 } 1716 } 1717 } 1718 } 1719 1720 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1721 1722 return (B_FALSE); 1723 } 1724 1725 /* 1726 * nxge_txdma_channel_hung 1727 * 1728 * Reset a TDC. 1729 * 1730 * Arguments: 1731 * nxgep 1732 * ring <channel>'s ring. 1733 * channel The channel to reset. 1734 * 1735 * Notes: 1736 * Called by nxge_txdma.c:nxge_txdma_hung() 1737 * 1738 * NPI/NXGE function calls: 1739 * npi_txdma_ring_head_get() 1740 * 1741 * Registers accessed: 1742 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1743 * 1744 * Context: 1745 * Any domain 1746 */ 1747 int 1748 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1749 { 1750 uint16_t head_index, tail_index; 1751 boolean_t head_wrap, tail_wrap; 1752 npi_handle_t handle; 1753 tx_ring_hdl_t tx_head; 1754 uint_t tx_rd_index; 1755 1756 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1757 1758 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1759 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1760 "==> nxge_txdma_channel_hung: channel %d", channel)); 1761 MUTEX_ENTER(&tx_ring_p->lock); 1762 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1763 1764 tail_index = tx_ring_p->wr_index; 1765 tail_wrap = tx_ring_p->wr_index_wrap; 1766 tx_rd_index = tx_ring_p->rd_index; 1767 MUTEX_EXIT(&tx_ring_p->lock); 1768 1769 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1770 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1771 "tail_index %d tail_wrap %d ", 1772 channel, tx_rd_index, tail_index, tail_wrap)); 1773 /* 1774 * Read the hardware maintained transmit head 1775 * and wrap around bit. 1776 */ 1777 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1778 head_index = tx_head.bits.ldw.head; 1779 head_wrap = tx_head.bits.ldw.wrap; 1780 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1781 "==> nxge_txdma_channel_hung: " 1782 "tx_rd_index %d tail %d tail_wrap %d " 1783 "head %d wrap %d", 1784 tx_rd_index, tail_index, tail_wrap, 1785 head_index, head_wrap)); 1786 1787 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1788 tail_index, tail_wrap) && 1789 (head_index == tx_rd_index)) { 1790 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1791 "==> nxge_txdma_channel_hung: EMPTY")); 1792 return (B_FALSE); 1793 } 1794 1795 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1796 "==> nxge_txdma_channel_hung: Checking if ring full")); 1797 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1798 tail_wrap)) { 1799 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1800 "==> nxge_txdma_channel_hung: full")); 1801 return (B_TRUE); 1802 } 1803 1804 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1805 1806 return (B_FALSE); 1807 } 1808 1809 /* 1810 * nxge_fixup_hung_txdma_rings 1811 * 1812 * Disable a TDC. 1813 * 1814 * Arguments: 1815 * nxgep 1816 * channel The channel to reset. 1817 * reg_data The current TX_CS. 1818 * 1819 * Notes: 1820 * Called by nxge_check_tx_hang() 1821 * 1822 * NPI/NXGE function calls: 1823 * npi_txdma_ring_head_get() 1824 * 1825 * Registers accessed: 1826 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1827 * 1828 * Context: 1829 * Any domain 1830 */ 1831 /*ARGSUSED*/ 1832 void 1833 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1834 { 1835 nxge_grp_set_t *set = &nxgep->tx_set; 1836 int tdc; 1837 1838 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1839 1840 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1841 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1842 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1843 return; 1844 } 1845 1846 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1847 if ((1 << tdc) & set->owned.map) { 1848 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1849 if (ring) { 1850 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1851 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1852 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1853 tdc)); 1854 } 1855 } 1856 } 1857 1858 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1859 } 1860 1861 /* 1862 * nxge_txdma_fixup_hung_channel 1863 * 1864 * 'Fix' a hung TDC. 1865 * 1866 * Arguments: 1867 * nxgep 1868 * channel The channel to fix. 1869 * 1870 * Notes: 1871 * Called by nxge_fixup_hung_txdma_rings() 1872 * 1873 * 1. Reclaim the TDC. 1874 * 2. Disable the TDC. 1875 * 1876 * NPI/NXGE function calls: 1877 * nxge_txdma_reclaim() 1878 * npi_txdma_channel_disable(TX_CS) 1879 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1880 * 1881 * Registers accessed: 1882 * TX_CS DMC+0x40028 Transmit Control And Status 1883 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1884 * 1885 * Context: 1886 * Any domain 1887 */ 1888 /*ARGSUSED*/ 1889 void 1890 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1891 { 1892 p_tx_ring_t ring_p; 1893 1894 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1895 ring_p = nxge_txdma_get_ring(nxgep, channel); 1896 if (ring_p == NULL) { 1897 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1898 "<== nxge_txdma_fix_hung_channel")); 1899 return; 1900 } 1901 1902 if (ring_p->tdc != channel) { 1903 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1904 "<== nxge_txdma_fix_hung_channel: channel not matched " 1905 "ring tdc %d passed channel", 1906 ring_p->tdc, channel)); 1907 return; 1908 } 1909 1910 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1911 1912 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1913 } 1914 1915 /*ARGSUSED*/ 1916 void 1917 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1918 uint16_t channel) 1919 { 1920 npi_handle_t handle; 1921 tdmc_intr_dbg_t intr_dbg; 1922 int status = NXGE_OK; 1923 1924 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1925 1926 if (ring_p == NULL) { 1927 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1928 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1929 return; 1930 } 1931 1932 if (ring_p->tdc != channel) { 1933 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1934 "<== nxge_txdma_fixup_hung_channel: channel " 1935 "not matched " 1936 "ring tdc %d passed channel", 1937 ring_p->tdc, channel)); 1938 return; 1939 } 1940 1941 /* Reclaim descriptors */ 1942 MUTEX_ENTER(&ring_p->lock); 1943 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1944 MUTEX_EXIT(&ring_p->lock); 1945 1946 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1947 /* 1948 * Stop the dma channel waits for the stop done. 1949 * If the stop done bit is not set, then force 1950 * an error. 1951 */ 1952 status = npi_txdma_channel_disable(handle, channel); 1953 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1954 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1955 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1956 "ring tdc %d passed channel %d", 1957 ring_p->tdc, channel)); 1958 return; 1959 } 1960 1961 /* Inject any error */ 1962 intr_dbg.value = 0; 1963 intr_dbg.bits.ldw.nack_pref = 1; 1964 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1965 1966 /* Stop done bit will be set as a result of error injection */ 1967 status = npi_txdma_channel_disable(handle, channel); 1968 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1969 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1970 "<== nxge_txdma_fixup_hung_channel: stopped again" 1971 "ring tdc %d passed channel", 1972 ring_p->tdc, channel)); 1973 return; 1974 } 1975 1976 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1977 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1978 "ring tdc %d passed channel", 1979 ring_p->tdc, channel)); 1980 1981 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1982 } 1983 1984 /*ARGSUSED*/ 1985 void 1986 nxge_reclaim_rings(p_nxge_t nxgep) 1987 { 1988 nxge_grp_set_t *set = &nxgep->tx_set; 1989 int tdc; 1990 1991 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 1992 1993 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1994 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1995 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1996 return; 1997 } 1998 1999 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2000 if ((1 << tdc) & set->owned.map) { 2001 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2002 if (ring) { 2003 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2004 "==> nxge_reclaim_rings: TDC %d", tdc)); 2005 MUTEX_ENTER(&ring->lock); 2006 (void) nxge_txdma_reclaim(nxgep, ring, tdc); 2007 MUTEX_EXIT(&ring->lock); 2008 } 2009 } 2010 } 2011 2012 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 2013 } 2014 2015 void 2016 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 2017 { 2018 nxge_grp_set_t *set = &nxgep->tx_set; 2019 npi_handle_t handle; 2020 int tdc; 2021 2022 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2023 2024 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2025 2026 if (!isLDOMguest(nxgep)) { 2027 (void) npi_txdma_dump_fzc_regs(handle); 2028 2029 /* Dump TXC registers. */ 2030 (void) npi_txc_dump_fzc_regs(handle); 2031 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2032 } 2033 2034 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2035 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2036 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2037 return; 2038 } 2039 2040 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2041 if ((1 << tdc) & set->owned.map) { 2042 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2043 if (ring) { 2044 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2045 "==> nxge_txdma_regs_dump_channels: " 2046 "TDC %d", tdc)); 2047 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2048 2049 /* Dump TXC registers, if able to. */ 2050 if (!isLDOMguest(nxgep)) { 2051 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2052 "==> nxge_txdma_regs_dump_channels:" 2053 " FZC TDC %d", tdc)); 2054 (void) npi_txc_dump_tdc_fzc_regs 2055 (handle, tdc); 2056 } 2057 nxge_txdma_regs_dump(nxgep, tdc); 2058 } 2059 } 2060 } 2061 2062 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2063 } 2064 2065 void 2066 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2067 { 2068 npi_handle_t handle; 2069 tx_ring_hdl_t hdl; 2070 tx_ring_kick_t kick; 2071 tx_cs_t cs; 2072 txc_control_t control; 2073 uint32_t bitmap = 0; 2074 uint32_t burst = 0; 2075 uint32_t bytes = 0; 2076 dma_log_page_t cfg; 2077 2078 printf("\n\tfunc # %d tdc %d ", 2079 nxgep->function_num, channel); 2080 cfg.page_num = 0; 2081 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2082 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2083 printf("\n\tlog page func %d valid page 0 %d", 2084 cfg.func_num, cfg.valid); 2085 cfg.page_num = 1; 2086 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2087 printf("\n\tlog page func %d valid page 1 %d", 2088 cfg.func_num, cfg.valid); 2089 2090 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2091 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2092 printf("\n\thead value is 0x%0llx", 2093 (long long)hdl.value); 2094 printf("\n\thead index %d", hdl.bits.ldw.head); 2095 printf("\n\tkick value is 0x%0llx", 2096 (long long)kick.value); 2097 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2098 2099 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2100 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2101 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2102 2103 (void) npi_txc_control(handle, OP_GET, &control); 2104 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2105 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2106 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2107 2108 printf("\n\tTXC port control 0x%0llx", 2109 (long long)control.value); 2110 printf("\n\tTXC port bitmap 0x%x", bitmap); 2111 printf("\n\tTXC max burst %d", burst); 2112 printf("\n\tTXC bytes xmt %d\n", bytes); 2113 2114 { 2115 ipp_status_t status; 2116 2117 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2118 #if defined(__i386) 2119 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2120 #else 2121 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2122 #endif 2123 } 2124 } 2125 2126 /* 2127 * nxge_tdc_hvio_setup 2128 * 2129 * I'm not exactly sure what this code does. 2130 * 2131 * Arguments: 2132 * nxgep 2133 * channel The channel to map. 2134 * 2135 * Notes: 2136 * 2137 * NPI/NXGE function calls: 2138 * na 2139 * 2140 * Context: 2141 * Service domain? 2142 */ 2143 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2144 static void 2145 nxge_tdc_hvio_setup( 2146 nxge_t *nxgep, int channel) 2147 { 2148 nxge_dma_common_t *data; 2149 nxge_dma_common_t *control; 2150 tx_ring_t *ring; 2151 2152 ring = nxgep->tx_rings->rings[channel]; 2153 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2154 2155 ring->hv_set = B_FALSE; 2156 2157 ring->hv_tx_buf_base_ioaddr_pp = 2158 (uint64_t)data->orig_ioaddr_pp; 2159 ring->hv_tx_buf_ioaddr_size = 2160 (uint64_t)data->orig_alength; 2161 2162 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2163 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2164 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2165 ring->hv_tx_buf_base_ioaddr_pp, 2166 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2167 data->ioaddr_pp, data->orig_vatopa, 2168 data->orig_alength, data->orig_alength)); 2169 2170 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2171 2172 ring->hv_tx_cntl_base_ioaddr_pp = 2173 (uint64_t)control->orig_ioaddr_pp; 2174 ring->hv_tx_cntl_ioaddr_size = 2175 (uint64_t)control->orig_alength; 2176 2177 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2178 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2179 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2180 ring->hv_tx_cntl_base_ioaddr_pp, 2181 control->orig_ioaddr_pp, control->orig_vatopa, 2182 ring->hv_tx_cntl_ioaddr_size, 2183 control->orig_alength, control->orig_alength)); 2184 } 2185 #endif 2186 2187 static nxge_status_t 2188 nxge_map_txdma(p_nxge_t nxgep, int channel) 2189 { 2190 nxge_dma_common_t **pData; 2191 nxge_dma_common_t **pControl; 2192 tx_ring_t **pRing, *ring; 2193 tx_mbox_t **mailbox; 2194 uint32_t num_chunks; 2195 2196 nxge_status_t status = NXGE_OK; 2197 2198 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2199 2200 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2201 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2203 "<== nxge_map_txdma: buf not allocated")); 2204 return (NXGE_ERROR); 2205 } 2206 } 2207 2208 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2209 return (NXGE_ERROR); 2210 2211 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2212 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2213 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2214 pRing = &nxgep->tx_rings->rings[channel]; 2215 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2216 2217 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2218 "tx_rings $%p tx_desc_rings $%p", 2219 nxgep->tx_rings, nxgep->tx_rings->rings)); 2220 2221 /* 2222 * Map descriptors from the buffer pools for <channel>. 2223 */ 2224 2225 /* 2226 * Set up and prepare buffer blocks, descriptors 2227 * and mailbox. 2228 */ 2229 status = nxge_map_txdma_channel(nxgep, channel, 2230 pData, pRing, num_chunks, pControl, mailbox); 2231 if (status != NXGE_OK) { 2232 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2233 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2234 "returned 0x%x", 2235 nxgep, channel, status)); 2236 return (status); 2237 } 2238 2239 ring = *pRing; 2240 2241 ring->index = (uint16_t)channel; 2242 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2243 2244 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2245 if (isLDOMguest(nxgep)) { 2246 (void) nxge_tdc_lp_conf(nxgep, channel); 2247 } else { 2248 nxge_tdc_hvio_setup(nxgep, channel); 2249 } 2250 #endif 2251 2252 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2253 "(status 0x%x channel %d)", status, channel)); 2254 2255 return (status); 2256 } 2257 2258 static nxge_status_t 2259 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2260 p_nxge_dma_common_t *dma_buf_p, 2261 p_tx_ring_t *tx_desc_p, 2262 uint32_t num_chunks, 2263 p_nxge_dma_common_t *dma_cntl_p, 2264 p_tx_mbox_t *tx_mbox_p) 2265 { 2266 int status = NXGE_OK; 2267 2268 /* 2269 * Set up and prepare buffer blocks, descriptors 2270 * and mailbox. 2271 */ 2272 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2273 "==> nxge_map_txdma_channel (channel %d)", channel)); 2274 /* 2275 * Transmit buffer blocks 2276 */ 2277 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2278 dma_buf_p, tx_desc_p, num_chunks); 2279 if (status != NXGE_OK) { 2280 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2281 "==> nxge_map_txdma_channel (channel %d): " 2282 "map buffer failed 0x%x", channel, status)); 2283 goto nxge_map_txdma_channel_exit; 2284 } 2285 2286 /* 2287 * Transmit block ring, and mailbox. 2288 */ 2289 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2290 tx_mbox_p); 2291 2292 goto nxge_map_txdma_channel_exit; 2293 2294 nxge_map_txdma_channel_fail1: 2295 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2296 "==> nxge_map_txdma_channel: unmap buf" 2297 "(status 0x%x channel %d)", 2298 status, channel)); 2299 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2300 2301 nxge_map_txdma_channel_exit: 2302 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2303 "<== nxge_map_txdma_channel: " 2304 "(status 0x%x channel %d)", 2305 status, channel)); 2306 2307 return (status); 2308 } 2309 2310 /*ARGSUSED*/ 2311 static void 2312 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2313 { 2314 tx_ring_t *ring; 2315 tx_mbox_t *mailbox; 2316 2317 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2318 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2319 /* 2320 * unmap tx block ring, and mailbox. 2321 */ 2322 ring = nxgep->tx_rings->rings[channel]; 2323 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2324 2325 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2326 2327 /* unmap buffer blocks */ 2328 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2329 2330 nxge_free_txb(nxgep, channel); 2331 2332 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2333 } 2334 2335 /* 2336 * nxge_map_txdma_channel_cfg_ring 2337 * 2338 * Map a TDC into our kernel space. 2339 * This function allocates all of the per-channel data structures. 2340 * 2341 * Arguments: 2342 * nxgep 2343 * dma_channel The channel to map. 2344 * dma_cntl_p 2345 * tx_ring_p dma_channel's transmit ring 2346 * tx_mbox_p dma_channel's mailbox 2347 * 2348 * Notes: 2349 * 2350 * NPI/NXGE function calls: 2351 * nxge_setup_dma_common() 2352 * 2353 * Registers accessed: 2354 * none. 2355 * 2356 * Context: 2357 * Any domain 2358 */ 2359 /*ARGSUSED*/ 2360 static void 2361 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2362 p_nxge_dma_common_t *dma_cntl_p, 2363 p_tx_ring_t tx_ring_p, 2364 p_tx_mbox_t *tx_mbox_p) 2365 { 2366 p_tx_mbox_t mboxp; 2367 p_nxge_dma_common_t cntl_dmap; 2368 p_nxge_dma_common_t dmap; 2369 p_tx_rng_cfig_t tx_ring_cfig_p; 2370 p_tx_ring_kick_t tx_ring_kick_p; 2371 p_tx_cs_t tx_cs_p; 2372 p_tx_dma_ent_msk_t tx_evmask_p; 2373 p_txdma_mbh_t mboxh_p; 2374 p_txdma_mbl_t mboxl_p; 2375 uint64_t tx_desc_len; 2376 2377 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2378 "==> nxge_map_txdma_channel_cfg_ring")); 2379 2380 cntl_dmap = *dma_cntl_p; 2381 2382 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2383 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2384 sizeof (tx_desc_t)); 2385 /* 2386 * Zero out transmit ring descriptors. 2387 */ 2388 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2389 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2390 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2391 tx_cs_p = &(tx_ring_p->tx_cs); 2392 tx_evmask_p = &(tx_ring_p->tx_evmask); 2393 tx_ring_cfig_p->value = 0; 2394 tx_ring_kick_p->value = 0; 2395 tx_cs_p->value = 0; 2396 tx_evmask_p->value = 0; 2397 2398 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2399 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2400 dma_channel, 2401 dmap->dma_cookie.dmac_laddress)); 2402 2403 tx_ring_cfig_p->value = 0; 2404 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2405 tx_ring_cfig_p->value = 2406 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2407 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2408 2409 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2410 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2411 dma_channel, 2412 tx_ring_cfig_p->value)); 2413 2414 tx_cs_p->bits.ldw.rst = 1; 2415 2416 /* Map in mailbox */ 2417 mboxp = (p_tx_mbox_t) 2418 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2419 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2420 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2421 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2422 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2423 mboxh_p->value = mboxl_p->value = 0; 2424 2425 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2426 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2427 dmap->dma_cookie.dmac_laddress)); 2428 2429 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2430 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2431 2432 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2433 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2434 2435 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2436 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2437 dmap->dma_cookie.dmac_laddress)); 2438 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2439 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2440 "mbox $%p", 2441 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2442 tx_ring_p->page_valid.value = 0; 2443 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2444 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2445 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2446 tx_ring_p->page_hdl.value = 0; 2447 2448 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2449 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2450 2451 tx_ring_p->max_burst.value = 0; 2452 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2453 2454 *tx_mbox_p = mboxp; 2455 2456 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2457 "<== nxge_map_txdma_channel_cfg_ring")); 2458 } 2459 2460 /*ARGSUSED*/ 2461 static void 2462 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2463 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2464 { 2465 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2466 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2467 tx_ring_p->tdc)); 2468 2469 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2470 2471 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2472 "<== nxge_unmap_txdma_channel_cfg_ring")); 2473 } 2474 2475 /* 2476 * nxge_map_txdma_channel_buf_ring 2477 * 2478 * 2479 * Arguments: 2480 * nxgep 2481 * channel The channel to map. 2482 * dma_buf_p 2483 * tx_desc_p channel's descriptor ring 2484 * num_chunks 2485 * 2486 * Notes: 2487 * 2488 * NPI/NXGE function calls: 2489 * nxge_setup_dma_common() 2490 * 2491 * Registers accessed: 2492 * none. 2493 * 2494 * Context: 2495 * Any domain 2496 */ 2497 static nxge_status_t 2498 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2499 p_nxge_dma_common_t *dma_buf_p, 2500 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2501 { 2502 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2503 p_nxge_dma_common_t dmap; 2504 nxge_os_dma_handle_t tx_buf_dma_handle; 2505 p_tx_ring_t tx_ring_p; 2506 p_tx_msg_t tx_msg_ring; 2507 nxge_status_t status = NXGE_OK; 2508 int ddi_status = DDI_SUCCESS; 2509 int i, j, index; 2510 uint32_t size, bsize; 2511 uint32_t nblocks, nmsgs; 2512 2513 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2514 "==> nxge_map_txdma_channel_buf_ring")); 2515 2516 dma_bufp = tmp_bufp = *dma_buf_p; 2517 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2518 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2519 "chunks bufp $%p", 2520 channel, num_chunks, dma_bufp)); 2521 2522 nmsgs = 0; 2523 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2524 nmsgs += tmp_bufp->nblocks; 2525 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2526 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2527 "bufp $%p nblocks %d nmsgs %d", 2528 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2529 } 2530 if (!nmsgs) { 2531 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2532 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2533 "no msg blocks", 2534 channel)); 2535 status = NXGE_ERROR; 2536 goto nxge_map_txdma_channel_buf_ring_exit; 2537 } 2538 2539 tx_ring_p = (p_tx_ring_t) 2540 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2541 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2542 (void *)nxgep->interrupt_cookie); 2543 2544 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2545 tx_ring_p->tx_ring_busy = B_FALSE; 2546 tx_ring_p->nxgep = nxgep; 2547 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2548 nxge_serial_tx, tx_ring_p); 2549 /* 2550 * Allocate transmit message rings and handles for packets 2551 * not to be copied to premapped buffers. 2552 */ 2553 size = nmsgs * sizeof (tx_msg_t); 2554 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2555 for (i = 0; i < nmsgs; i++) { 2556 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2557 DDI_DMA_DONTWAIT, 0, 2558 &tx_msg_ring[i].dma_handle); 2559 if (ddi_status != DDI_SUCCESS) { 2560 status |= NXGE_DDI_FAILED; 2561 break; 2562 } 2563 } 2564 if (i < nmsgs) { 2565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2566 "Allocate handles failed.")); 2567 goto nxge_map_txdma_channel_buf_ring_fail1; 2568 } 2569 2570 tx_ring_p->tdc = channel; 2571 tx_ring_p->tx_msg_ring = tx_msg_ring; 2572 tx_ring_p->tx_ring_size = nmsgs; 2573 tx_ring_p->num_chunks = num_chunks; 2574 if (!nxge_tx_intr_thres) { 2575 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2576 } 2577 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2578 tx_ring_p->rd_index = 0; 2579 tx_ring_p->wr_index = 0; 2580 tx_ring_p->ring_head.value = 0; 2581 tx_ring_p->ring_kick_tail.value = 0; 2582 tx_ring_p->descs_pending = 0; 2583 2584 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2585 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2586 "actual tx desc max %d nmsgs %d " 2587 "(config nxge_tx_ring_size %d)", 2588 channel, tx_ring_p->tx_ring_size, nmsgs, 2589 nxge_tx_ring_size)); 2590 2591 /* 2592 * Map in buffers from the buffer pool. 2593 */ 2594 index = 0; 2595 bsize = dma_bufp->block_size; 2596 2597 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2598 "dma_bufp $%p tx_rng_p $%p " 2599 "tx_msg_rng_p $%p bsize %d", 2600 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2601 2602 tx_buf_dma_handle = dma_bufp->dma_handle; 2603 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2604 bsize = dma_bufp->block_size; 2605 nblocks = dma_bufp->nblocks; 2606 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2607 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2608 "size %d dma_bufp $%p", 2609 i, sizeof (nxge_dma_common_t), dma_bufp)); 2610 2611 for (j = 0; j < nblocks; j++) { 2612 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2613 dmap = &tx_msg_ring[index++].buf_dma; 2614 #ifdef TX_MEM_DEBUG 2615 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2616 "==> nxge_map_txdma_channel_buf_ring: j %d" 2617 "dmap $%p", i, dmap)); 2618 #endif 2619 nxge_setup_dma_common(dmap, dma_bufp, 1, 2620 bsize); 2621 } 2622 } 2623 2624 if (i < num_chunks) { 2625 status = NXGE_ERROR; 2626 goto nxge_map_txdma_channel_buf_ring_fail1; 2627 } 2628 2629 *tx_desc_p = tx_ring_p; 2630 2631 goto nxge_map_txdma_channel_buf_ring_exit; 2632 2633 nxge_map_txdma_channel_buf_ring_fail1: 2634 if (tx_ring_p->serial) { 2635 nxge_serialize_destroy(tx_ring_p->serial); 2636 tx_ring_p->serial = NULL; 2637 } 2638 2639 index--; 2640 for (; index >= 0; index--) { 2641 if (tx_msg_ring[index].dma_handle != NULL) { 2642 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2643 } 2644 } 2645 MUTEX_DESTROY(&tx_ring_p->lock); 2646 KMEM_FREE(tx_msg_ring, size); 2647 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2648 2649 status = NXGE_ERROR; 2650 2651 nxge_map_txdma_channel_buf_ring_exit: 2652 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2653 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2654 2655 return (status); 2656 } 2657 2658 /*ARGSUSED*/ 2659 static void 2660 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2661 { 2662 p_tx_msg_t tx_msg_ring; 2663 p_tx_msg_t tx_msg_p; 2664 int i; 2665 2666 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2667 "==> nxge_unmap_txdma_channel_buf_ring")); 2668 if (tx_ring_p == NULL) { 2669 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2670 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2671 return; 2672 } 2673 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2674 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2675 tx_ring_p->tdc)); 2676 2677 tx_msg_ring = tx_ring_p->tx_msg_ring; 2678 2679 /* 2680 * Since the serialization thread, timer thread and 2681 * interrupt thread can all call the transmit reclaim, 2682 * the unmapping function needs to acquire the lock 2683 * to free those buffers which were transmitted 2684 * by the hardware already. 2685 */ 2686 MUTEX_ENTER(&tx_ring_p->lock); 2687 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2688 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2689 "channel %d", 2690 tx_ring_p->tdc)); 2691 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2692 2693 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2694 tx_msg_p = &tx_msg_ring[i]; 2695 if (tx_msg_p->tx_message != NULL) { 2696 freemsg(tx_msg_p->tx_message); 2697 tx_msg_p->tx_message = NULL; 2698 } 2699 } 2700 2701 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2702 if (tx_msg_ring[i].dma_handle != NULL) { 2703 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2704 } 2705 tx_msg_ring[i].dma_handle = NULL; 2706 } 2707 2708 MUTEX_EXIT(&tx_ring_p->lock); 2709 2710 if (tx_ring_p->serial) { 2711 nxge_serialize_destroy(tx_ring_p->serial); 2712 tx_ring_p->serial = NULL; 2713 } 2714 2715 MUTEX_DESTROY(&tx_ring_p->lock); 2716 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2717 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2718 2719 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2720 "<== nxge_unmap_txdma_channel_buf_ring")); 2721 } 2722 2723 static nxge_status_t 2724 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2725 { 2726 p_tx_rings_t tx_rings; 2727 p_tx_ring_t *tx_desc_rings; 2728 p_tx_mbox_areas_t tx_mbox_areas_p; 2729 p_tx_mbox_t *tx_mbox_p; 2730 nxge_status_t status = NXGE_OK; 2731 2732 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2733 2734 tx_rings = nxgep->tx_rings; 2735 if (tx_rings == NULL) { 2736 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2737 "<== nxge_txdma_hw_start: NULL ring pointer")); 2738 return (NXGE_ERROR); 2739 } 2740 tx_desc_rings = tx_rings->rings; 2741 if (tx_desc_rings == NULL) { 2742 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2743 "<== nxge_txdma_hw_start: NULL ring pointers")); 2744 return (NXGE_ERROR); 2745 } 2746 2747 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2748 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2749 2750 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2751 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2752 2753 status = nxge_txdma_start_channel(nxgep, channel, 2754 (p_tx_ring_t)tx_desc_rings[channel], 2755 (p_tx_mbox_t)tx_mbox_p[channel]); 2756 if (status != NXGE_OK) { 2757 goto nxge_txdma_hw_start_fail1; 2758 } 2759 2760 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2761 "tx_rings $%p rings $%p", 2762 nxgep->tx_rings, nxgep->tx_rings->rings)); 2763 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2764 "tx_rings $%p tx_desc_rings $%p", 2765 nxgep->tx_rings, tx_desc_rings)); 2766 2767 goto nxge_txdma_hw_start_exit; 2768 2769 nxge_txdma_hw_start_fail1: 2770 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2771 "==> nxge_txdma_hw_start: disable " 2772 "(status 0x%x channel %d)", status, channel)); 2773 2774 nxge_txdma_hw_start_exit: 2775 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2776 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2777 2778 return (status); 2779 } 2780 2781 /* 2782 * nxge_txdma_start_channel 2783 * 2784 * Start a TDC. 2785 * 2786 * Arguments: 2787 * nxgep 2788 * channel The channel to start. 2789 * tx_ring_p channel's transmit descriptor ring. 2790 * tx_mbox_p channel' smailbox. 2791 * 2792 * Notes: 2793 * 2794 * NPI/NXGE function calls: 2795 * nxge_reset_txdma_channel() 2796 * nxge_init_txdma_channel_event_mask() 2797 * nxge_enable_txdma_channel() 2798 * 2799 * Registers accessed: 2800 * none directly (see functions above). 2801 * 2802 * Context: 2803 * Any domain 2804 */ 2805 static nxge_status_t 2806 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2807 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2808 2809 { 2810 nxge_status_t status = NXGE_OK; 2811 2812 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2813 "==> nxge_txdma_start_channel (channel %d)", channel)); 2814 /* 2815 * TXDMA/TXC must be in stopped state. 2816 */ 2817 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2818 2819 /* 2820 * Reset TXDMA channel 2821 */ 2822 tx_ring_p->tx_cs.value = 0; 2823 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2824 status = nxge_reset_txdma_channel(nxgep, channel, 2825 tx_ring_p->tx_cs.value); 2826 if (status != NXGE_OK) { 2827 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2828 "==> nxge_txdma_start_channel (channel %d)" 2829 " reset channel failed 0x%x", channel, status)); 2830 goto nxge_txdma_start_channel_exit; 2831 } 2832 2833 /* 2834 * Initialize the TXDMA channel specific FZC control 2835 * configurations. These FZC registers are pertaining 2836 * to each TX channel (i.e. logical pages). 2837 */ 2838 if (!isLDOMguest(nxgep)) { 2839 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2840 tx_ring_p, tx_mbox_p); 2841 if (status != NXGE_OK) { 2842 goto nxge_txdma_start_channel_exit; 2843 } 2844 } 2845 2846 /* 2847 * Initialize the event masks. 2848 */ 2849 tx_ring_p->tx_evmask.value = 0; 2850 status = nxge_init_txdma_channel_event_mask(nxgep, 2851 channel, &tx_ring_p->tx_evmask); 2852 if (status != NXGE_OK) { 2853 goto nxge_txdma_start_channel_exit; 2854 } 2855 2856 /* 2857 * Load TXDMA descriptors, buffers, mailbox, 2858 * initialise the DMA channels and 2859 * enable each DMA channel. 2860 */ 2861 status = nxge_enable_txdma_channel(nxgep, channel, 2862 tx_ring_p, tx_mbox_p); 2863 if (status != NXGE_OK) { 2864 goto nxge_txdma_start_channel_exit; 2865 } 2866 2867 nxge_txdma_start_channel_exit: 2868 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2869 2870 return (status); 2871 } 2872 2873 /* 2874 * nxge_txdma_stop_channel 2875 * 2876 * Stop a TDC. 2877 * 2878 * Arguments: 2879 * nxgep 2880 * channel The channel to stop. 2881 * tx_ring_p channel's transmit descriptor ring. 2882 * tx_mbox_p channel' smailbox. 2883 * 2884 * Notes: 2885 * 2886 * NPI/NXGE function calls: 2887 * nxge_txdma_stop_inj_err() 2888 * nxge_reset_txdma_channel() 2889 * nxge_init_txdma_channel_event_mask() 2890 * nxge_init_txdma_channel_cntl_stat() 2891 * nxge_disable_txdma_channel() 2892 * 2893 * Registers accessed: 2894 * none directly (see functions above). 2895 * 2896 * Context: 2897 * Any domain 2898 */ 2899 /*ARGSUSED*/ 2900 static nxge_status_t 2901 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2902 { 2903 p_tx_ring_t tx_ring_p; 2904 int status = NXGE_OK; 2905 2906 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2907 "==> nxge_txdma_stop_channel: channel %d", channel)); 2908 2909 /* 2910 * Stop (disable) TXDMA and TXC (if stop bit is set 2911 * and STOP_N_GO bit not set, the TXDMA reset state will 2912 * not be set if reset TXDMA. 2913 */ 2914 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2915 2916 tx_ring_p = nxgep->tx_rings->rings[channel]; 2917 2918 /* 2919 * Reset TXDMA channel 2920 */ 2921 tx_ring_p->tx_cs.value = 0; 2922 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2923 status = nxge_reset_txdma_channel(nxgep, channel, 2924 tx_ring_p->tx_cs.value); 2925 if (status != NXGE_OK) { 2926 goto nxge_txdma_stop_channel_exit; 2927 } 2928 2929 #ifdef HARDWARE_REQUIRED 2930 /* Set up the interrupt event masks. */ 2931 tx_ring_p->tx_evmask.value = 0; 2932 status = nxge_init_txdma_channel_event_mask(nxgep, 2933 channel, &tx_ring_p->tx_evmask); 2934 if (status != NXGE_OK) { 2935 goto nxge_txdma_stop_channel_exit; 2936 } 2937 2938 /* Initialize the DMA control and status register */ 2939 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2940 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2941 tx_ring_p->tx_cs.value); 2942 if (status != NXGE_OK) { 2943 goto nxge_txdma_stop_channel_exit; 2944 } 2945 2946 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2947 2948 /* Disable channel */ 2949 status = nxge_disable_txdma_channel(nxgep, channel, 2950 tx_ring_p, tx_mbox_p); 2951 if (status != NXGE_OK) { 2952 goto nxge_txdma_start_channel_exit; 2953 } 2954 2955 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2956 "==> nxge_txdma_stop_channel: event done")); 2957 2958 #endif 2959 2960 nxge_txdma_stop_channel_exit: 2961 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2962 return (status); 2963 } 2964 2965 /* 2966 * nxge_txdma_get_ring 2967 * 2968 * Get the ring for a TDC. 2969 * 2970 * Arguments: 2971 * nxgep 2972 * channel 2973 * 2974 * Notes: 2975 * 2976 * NPI/NXGE function calls: 2977 * 2978 * Registers accessed: 2979 * 2980 * Context: 2981 * Any domain 2982 */ 2983 static p_tx_ring_t 2984 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2985 { 2986 nxge_grp_set_t *set = &nxgep->tx_set; 2987 int tdc; 2988 2989 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2990 2991 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2992 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2993 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 2994 goto return_null; 2995 } 2996 2997 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2998 if ((1 << tdc) & set->owned.map) { 2999 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3000 if (ring) { 3001 if (channel == ring->tdc) { 3002 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3003 "<== nxge_txdma_get_ring: " 3004 "tdc %d ring $%p", tdc, ring)); 3005 return (ring); 3006 } 3007 } 3008 } 3009 } 3010 3011 return_null: 3012 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 3013 "ring not found")); 3014 3015 return (NULL); 3016 } 3017 3018 /* 3019 * nxge_txdma_get_mbox 3020 * 3021 * Get the mailbox for a TDC. 3022 * 3023 * Arguments: 3024 * nxgep 3025 * channel 3026 * 3027 * Notes: 3028 * 3029 * NPI/NXGE function calls: 3030 * 3031 * Registers accessed: 3032 * 3033 * Context: 3034 * Any domain 3035 */ 3036 static p_tx_mbox_t 3037 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3038 { 3039 nxge_grp_set_t *set = &nxgep->tx_set; 3040 int tdc; 3041 3042 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3043 3044 if (nxgep->tx_mbox_areas_p == 0 || 3045 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3046 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3047 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3048 goto return_null; 3049 } 3050 3051 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3052 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3053 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3054 goto return_null; 3055 } 3056 3057 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3058 if ((1 << tdc) & set->owned.map) { 3059 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3060 if (ring) { 3061 if (channel == ring->tdc) { 3062 tx_mbox_t *mailbox = nxgep-> 3063 tx_mbox_areas_p-> 3064 txmbox_areas_p[tdc]; 3065 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3066 "<== nxge_txdma_get_mbox: tdc %d " 3067 "ring $%p", tdc, mailbox)); 3068 return (mailbox); 3069 } 3070 } 3071 } 3072 } 3073 3074 return_null: 3075 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3076 "mailbox not found")); 3077 3078 return (NULL); 3079 } 3080 3081 /* 3082 * nxge_tx_err_evnts 3083 * 3084 * Recover a TDC. 3085 * 3086 * Arguments: 3087 * nxgep 3088 * index The index to the TDC ring. 3089 * ldvp Used to get the channel number ONLY. 3090 * cs A copy of the bits from TX_CS. 3091 * 3092 * Notes: 3093 * Calling tree: 3094 * nxge_tx_intr() 3095 * 3096 * NPI/NXGE function calls: 3097 * npi_txdma_ring_error_get() 3098 * npi_txdma_inj_par_error_get() 3099 * nxge_txdma_fatal_err_recover() 3100 * 3101 * Registers accessed: 3102 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3103 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3104 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3105 * 3106 * Context: 3107 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3108 */ 3109 /*ARGSUSED*/ 3110 static nxge_status_t 3111 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3112 { 3113 npi_handle_t handle; 3114 npi_status_t rs; 3115 uint8_t channel; 3116 p_tx_ring_t *tx_rings; 3117 p_tx_ring_t tx_ring_p; 3118 p_nxge_tx_ring_stats_t tdc_stats; 3119 boolean_t txchan_fatal = B_FALSE; 3120 nxge_status_t status = NXGE_OK; 3121 tdmc_inj_par_err_t par_err; 3122 uint32_t value; 3123 3124 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3125 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3126 channel = ldvp->channel; 3127 3128 tx_rings = nxgep->tx_rings->rings; 3129 tx_ring_p = tx_rings[index]; 3130 tdc_stats = tx_ring_p->tdc_stats; 3131 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3132 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3133 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3134 if ((rs = npi_txdma_ring_error_get(handle, channel, 3135 &tdc_stats->errlog)) != NPI_SUCCESS) 3136 return (NXGE_ERROR | rs); 3137 } 3138 3139 if (cs.bits.ldw.mbox_err) { 3140 tdc_stats->mbox_err++; 3141 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3142 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3144 "==> nxge_tx_err_evnts(channel %d): " 3145 "fatal error: mailbox", channel)); 3146 txchan_fatal = B_TRUE; 3147 } 3148 if (cs.bits.ldw.pkt_size_err) { 3149 tdc_stats->pkt_size_err++; 3150 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3151 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3153 "==> nxge_tx_err_evnts(channel %d): " 3154 "fatal error: pkt_size_err", channel)); 3155 txchan_fatal = B_TRUE; 3156 } 3157 if (cs.bits.ldw.tx_ring_oflow) { 3158 tdc_stats->tx_ring_oflow++; 3159 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3160 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3162 "==> nxge_tx_err_evnts(channel %d): " 3163 "fatal error: tx_ring_oflow", channel)); 3164 txchan_fatal = B_TRUE; 3165 } 3166 if (cs.bits.ldw.pref_buf_par_err) { 3167 tdc_stats->pre_buf_par_err++; 3168 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3169 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3171 "==> nxge_tx_err_evnts(channel %d): " 3172 "fatal error: pre_buf_par_err", channel)); 3173 /* Clear error injection source for parity error */ 3174 (void) npi_txdma_inj_par_error_get(handle, &value); 3175 par_err.value = value; 3176 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3177 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3178 txchan_fatal = B_TRUE; 3179 } 3180 if (cs.bits.ldw.nack_pref) { 3181 tdc_stats->nack_pref++; 3182 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3183 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3185 "==> nxge_tx_err_evnts(channel %d): " 3186 "fatal error: nack_pref", channel)); 3187 txchan_fatal = B_TRUE; 3188 } 3189 if (cs.bits.ldw.nack_pkt_rd) { 3190 tdc_stats->nack_pkt_rd++; 3191 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3192 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3193 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3194 "==> nxge_tx_err_evnts(channel %d): " 3195 "fatal error: nack_pkt_rd", channel)); 3196 txchan_fatal = B_TRUE; 3197 } 3198 if (cs.bits.ldw.conf_part_err) { 3199 tdc_stats->conf_part_err++; 3200 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3201 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3203 "==> nxge_tx_err_evnts(channel %d): " 3204 "fatal error: config_partition_err", channel)); 3205 txchan_fatal = B_TRUE; 3206 } 3207 if (cs.bits.ldw.pkt_prt_err) { 3208 tdc_stats->pkt_part_err++; 3209 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3210 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3211 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3212 "==> nxge_tx_err_evnts(channel %d): " 3213 "fatal error: pkt_prt_err", channel)); 3214 txchan_fatal = B_TRUE; 3215 } 3216 3217 /* Clear error injection source in case this is an injected error */ 3218 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3219 3220 if (txchan_fatal) { 3221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3222 " nxge_tx_err_evnts: " 3223 " fatal error on channel %d cs 0x%llx\n", 3224 channel, cs.value)); 3225 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3226 tx_ring_p); 3227 if (status == NXGE_OK) { 3228 FM_SERVICE_RESTORED(nxgep); 3229 } 3230 } 3231 3232 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3233 3234 return (status); 3235 } 3236 3237 static nxge_status_t 3238 nxge_txdma_fatal_err_recover( 3239 p_nxge_t nxgep, 3240 uint16_t channel, 3241 p_tx_ring_t tx_ring_p) 3242 { 3243 npi_handle_t handle; 3244 npi_status_t rs = NPI_SUCCESS; 3245 p_tx_mbox_t tx_mbox_p; 3246 nxge_status_t status = NXGE_OK; 3247 3248 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3250 "Recovering from TxDMAChannel#%d error...", channel)); 3251 3252 /* 3253 * Stop the dma channel waits for the stop done. 3254 * If the stop done bit is not set, then create 3255 * an error. 3256 */ 3257 3258 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3259 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3260 MUTEX_ENTER(&tx_ring_p->lock); 3261 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3262 if (rs != NPI_SUCCESS) { 3263 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3264 "==> nxge_txdma_fatal_err_recover (channel %d): " 3265 "stop failed ", channel)); 3266 goto fail; 3267 } 3268 3269 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3270 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3271 3272 /* 3273 * Reset TXDMA channel 3274 */ 3275 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3276 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3277 NPI_SUCCESS) { 3278 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3279 "==> nxge_txdma_fatal_err_recover (channel %d)" 3280 " reset channel failed 0x%x", channel, rs)); 3281 goto fail; 3282 } 3283 3284 /* 3285 * Reset the tail (kick) register to 0. 3286 * (Hardware will not reset it. Tx overflow fatal 3287 * error if tail is not set to 0 after reset! 3288 */ 3289 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3290 3291 /* Restart TXDMA channel */ 3292 3293 if (!isLDOMguest(nxgep)) { 3294 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3295 3296 // XXX This is a problem in HIO! 3297 /* 3298 * Initialize the TXDMA channel specific FZC control 3299 * configurations. These FZC registers are pertaining 3300 * to each TX channel (i.e. logical pages). 3301 */ 3302 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3303 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3304 tx_ring_p, tx_mbox_p); 3305 if (status != NXGE_OK) 3306 goto fail; 3307 } 3308 3309 /* 3310 * Initialize the event masks. 3311 */ 3312 tx_ring_p->tx_evmask.value = 0; 3313 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3314 &tx_ring_p->tx_evmask); 3315 if (status != NXGE_OK) 3316 goto fail; 3317 3318 tx_ring_p->wr_index_wrap = B_FALSE; 3319 tx_ring_p->wr_index = 0; 3320 tx_ring_p->rd_index = 0; 3321 3322 /* 3323 * Load TXDMA descriptors, buffers, mailbox, 3324 * initialise the DMA channels and 3325 * enable each DMA channel. 3326 */ 3327 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3328 status = nxge_enable_txdma_channel(nxgep, channel, 3329 tx_ring_p, tx_mbox_p); 3330 MUTEX_EXIT(&tx_ring_p->lock); 3331 if (status != NXGE_OK) 3332 goto fail; 3333 3334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3335 "Recovery Successful, TxDMAChannel#%d Restored", 3336 channel)); 3337 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3338 3339 return (NXGE_OK); 3340 3341 fail: 3342 MUTEX_EXIT(&tx_ring_p->lock); 3343 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3344 "nxge_txdma_fatal_err_recover (channel %d): " 3345 "failed to recover this txdma channel", channel)); 3346 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3347 3348 return (status); 3349 } 3350 3351 /* 3352 * nxge_tx_port_fatal_err_recover 3353 * 3354 * Attempt to recover from a fatal port error. 3355 * 3356 * Arguments: 3357 * nxgep 3358 * 3359 * Notes: 3360 * How would a guest do this? 3361 * 3362 * NPI/NXGE function calls: 3363 * 3364 * Registers accessed: 3365 * 3366 * Context: 3367 * Service domain 3368 */ 3369 nxge_status_t 3370 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3371 { 3372 nxge_grp_set_t *set = &nxgep->tx_set; 3373 nxge_channel_t tdc; 3374 3375 tx_ring_t *ring; 3376 tx_mbox_t *mailbox; 3377 3378 npi_handle_t handle; 3379 nxge_status_t status; 3380 npi_status_t rs; 3381 3382 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3384 "Recovering from TxPort error...")); 3385 3386 if (isLDOMguest(nxgep)) { 3387 return (NXGE_OK); 3388 } 3389 3390 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3391 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3392 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3393 return (NXGE_ERROR); 3394 } 3395 3396 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3397 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3398 "<== nxge_tx_port_fatal_err_recover: " 3399 "NULL ring pointer(s)")); 3400 return (NXGE_ERROR); 3401 } 3402 3403 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3404 if ((1 << tdc) & set->owned.map) { 3405 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3406 if (ring) 3407 MUTEX_ENTER(&ring->lock); 3408 } 3409 } 3410 3411 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3412 3413 /* 3414 * Stop all the TDCs owned by us. 3415 * (The shared TDCs will have been stopped by their owners.) 3416 */ 3417 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3418 if ((1 << tdc) & set->owned.map) { 3419 ring = nxgep->tx_rings->rings[tdc]; 3420 if (ring) { 3421 rs = npi_txdma_channel_control 3422 (handle, TXDMA_STOP, tdc); 3423 if (rs != NPI_SUCCESS) { 3424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3425 "nxge_tx_port_fatal_err_recover " 3426 "(channel %d): stop failed ", tdc)); 3427 goto fail; 3428 } 3429 } 3430 } 3431 } 3432 3433 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3434 3435 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3436 if ((1 << tdc) & set->owned.map) { 3437 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3438 if (ring) 3439 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3440 } 3441 } 3442 3443 /* 3444 * Reset all the TDCs. 3445 */ 3446 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3447 3448 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3449 if ((1 << tdc) & set->owned.map) { 3450 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3451 if (ring) { 3452 if ((rs = npi_txdma_channel_control 3453 (handle, TXDMA_RESET, tdc)) 3454 != NPI_SUCCESS) { 3455 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3456 "nxge_tx_port_fatal_err_recover " 3457 "(channel %d) reset channel " 3458 "failed 0x%x", tdc, rs)); 3459 goto fail; 3460 } 3461 } 3462 /* 3463 * Reset the tail (kick) register to 0. 3464 * (Hardware will not reset it. Tx overflow fatal 3465 * error if tail is not set to 0 after reset! 3466 */ 3467 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3468 } 3469 } 3470 3471 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3472 3473 /* Restart all the TDCs */ 3474 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3475 if ((1 << tdc) & set->owned.map) { 3476 ring = nxgep->tx_rings->rings[tdc]; 3477 if (ring) { 3478 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3479 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3480 ring, mailbox); 3481 ring->tx_evmask.value = 0; 3482 /* 3483 * Initialize the event masks. 3484 */ 3485 status = nxge_init_txdma_channel_event_mask 3486 (nxgep, tdc, &ring->tx_evmask); 3487 3488 ring->wr_index_wrap = B_FALSE; 3489 ring->wr_index = 0; 3490 ring->rd_index = 0; 3491 3492 if (status != NXGE_OK) 3493 goto fail; 3494 if (status != NXGE_OK) 3495 goto fail; 3496 } 3497 } 3498 } 3499 3500 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3501 3502 /* Re-enable all the TDCs */ 3503 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3504 if ((1 << tdc) & set->owned.map) { 3505 ring = nxgep->tx_rings->rings[tdc]; 3506 if (ring) { 3507 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3508 status = nxge_enable_txdma_channel(nxgep, tdc, 3509 ring, mailbox); 3510 if (status != NXGE_OK) 3511 goto fail; 3512 } 3513 } 3514 } 3515 3516 /* 3517 * Unlock all the TDCs. 3518 */ 3519 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3520 if ((1 << tdc) & set->owned.map) { 3521 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3522 if (ring) 3523 MUTEX_EXIT(&ring->lock); 3524 } 3525 } 3526 3527 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3528 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3529 3530 return (NXGE_OK); 3531 3532 fail: 3533 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3534 if ((1 << tdc) & set->owned.map) { 3535 ring = nxgep->tx_rings->rings[tdc]; 3536 if (ring) 3537 MUTEX_EXIT(&ring->lock); 3538 } 3539 } 3540 3541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3542 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3543 3544 return (status); 3545 } 3546 3547 /* 3548 * nxge_txdma_inject_err 3549 * 3550 * Inject an error into a TDC. 3551 * 3552 * Arguments: 3553 * nxgep 3554 * err_id The error to inject. 3555 * chan The channel to inject into. 3556 * 3557 * Notes: 3558 * This is called from nxge_main.c:nxge_err_inject() 3559 * Has this ioctl ever been used? 3560 * 3561 * NPI/NXGE function calls: 3562 * npi_txdma_inj_par_error_get() 3563 * npi_txdma_inj_par_error_set() 3564 * 3565 * Registers accessed: 3566 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3567 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3568 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3569 * 3570 * Context: 3571 * Service domain 3572 */ 3573 void 3574 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3575 { 3576 tdmc_intr_dbg_t tdi; 3577 tdmc_inj_par_err_t par_err; 3578 uint32_t value; 3579 npi_handle_t handle; 3580 3581 switch (err_id) { 3582 3583 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3584 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3585 /* Clear error injection source for parity error */ 3586 (void) npi_txdma_inj_par_error_get(handle, &value); 3587 par_err.value = value; 3588 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3589 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3590 3591 par_err.bits.ldw.inject_parity_error = (1 << chan); 3592 (void) npi_txdma_inj_par_error_get(handle, &value); 3593 par_err.value = value; 3594 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3595 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3596 (unsigned long long)par_err.value); 3597 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3598 break; 3599 3600 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3601 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3602 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3603 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3604 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3605 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3606 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3607 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3608 chan, &tdi.value); 3609 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3610 tdi.bits.ldw.pref_buf_par_err = 1; 3611 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3612 tdi.bits.ldw.mbox_err = 1; 3613 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3614 tdi.bits.ldw.nack_pref = 1; 3615 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3616 tdi.bits.ldw.nack_pkt_rd = 1; 3617 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3618 tdi.bits.ldw.pkt_size_err = 1; 3619 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3620 tdi.bits.ldw.tx_ring_oflow = 1; 3621 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3622 tdi.bits.ldw.conf_part_err = 1; 3623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3624 tdi.bits.ldw.pkt_part_err = 1; 3625 #if defined(__i386) 3626 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3627 tdi.value); 3628 #else 3629 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3630 tdi.value); 3631 #endif 3632 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3633 chan, tdi.value); 3634 3635 break; 3636 } 3637 } 3638