1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_txdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 #include <npi_tx_rd64.h> 30 #include <npi_tx_wr64.h> 31 #include <sys/llc1.h> 32 33 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 34 uint32_t nxge_tx_minfree = 32; 35 uint32_t nxge_tx_intr_thres = 0; 36 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 37 uint32_t nxge_tx_tiny_pack = 1; 38 uint32_t nxge_tx_use_bcopy = 1; 39 40 extern uint32_t nxge_tx_ring_size; 41 extern uint32_t nxge_bcopy_thresh; 42 extern uint32_t nxge_dvma_thresh; 43 extern uint32_t nxge_dma_stream_thresh; 44 extern dma_method_t nxge_force_dma; 45 extern uint32_t nxge_cksum_offload; 46 47 /* Device register access attributes for PIO. */ 48 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 49 /* Device descriptor access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 51 /* Device buffer access attributes for DMA. */ 52 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 53 extern ddi_dma_attr_t nxge_desc_dma_attr; 54 extern ddi_dma_attr_t nxge_tx_dma_attr; 55 56 extern int nxge_serial_tx(mblk_t *mp, void *arg); 57 58 void nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p); 59 60 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 61 62 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 63 64 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 65 p_nxge_dma_common_t *, p_tx_ring_t *, 66 uint32_t, p_nxge_dma_common_t *, 67 p_tx_mbox_t *); 68 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 69 70 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 71 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 72 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 73 74 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 75 p_nxge_dma_common_t *, p_tx_ring_t, 76 p_tx_mbox_t *); 77 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 78 p_tx_ring_t, p_tx_mbox_t); 79 80 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 81 p_tx_ring_t, p_tx_mbox_t); 82 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 83 84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 86 p_nxge_ldv_t, tx_cs_t); 87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 89 uint16_t, p_tx_ring_t); 90 91 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 92 p_tx_ring_t ring_p, uint16_t channel); 93 94 nxge_status_t 95 nxge_init_txdma_channels(p_nxge_t nxgep) 96 { 97 nxge_grp_set_t *set = &nxgep->tx_set; 98 int i, tdc, count; 99 nxge_grp_t *group; 100 101 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 102 103 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 104 if ((1 << i) & set->lg.map) { 105 group = set->group[i]; 106 107 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 108 if ((1 << tdc) & group->map) { 109 if ((nxge_grp_dc_add(nxgep, group, 110 VP_BOUND_TX, tdc))) 111 goto init_txdma_channels_exit; 112 } 113 } 114 } 115 116 if (++count == set->lg.count) 117 break; 118 } 119 120 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 121 return (NXGE_OK); 122 123 init_txdma_channels_exit: 124 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 125 if ((1 << i) & set->lg.map) { 126 group = set->group[i]; 127 128 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 129 if ((1 << tdc) & group->map) { 130 nxge_grp_dc_remove(nxgep, 131 VP_BOUND_TX, tdc); 132 } 133 } 134 } 135 136 if (++count == set->lg.count) 137 break; 138 } 139 140 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 141 return (NXGE_ERROR); 142 } 143 144 nxge_status_t 145 nxge_init_txdma_channel( 146 p_nxge_t nxge, 147 int channel) 148 { 149 nxge_status_t status; 150 151 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 152 153 status = nxge_map_txdma(nxge, channel); 154 if (status != NXGE_OK) { 155 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 156 "<== nxge_init_txdma_channel: status 0x%x", status)); 157 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 158 return (status); 159 } 160 161 status = nxge_txdma_hw_start(nxge, channel); 162 if (status != NXGE_OK) { 163 (void) nxge_unmap_txdma_channel(nxge, channel); 164 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 165 return (status); 166 } 167 168 if (!nxge->statsp->tdc_ksp[channel]) 169 nxge_setup_tdc_kstats(nxge, channel); 170 171 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 172 173 return (status); 174 } 175 176 void 177 nxge_uninit_txdma_channels(p_nxge_t nxgep) 178 { 179 nxge_grp_set_t *set = &nxgep->tx_set; 180 int tdc; 181 182 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 183 184 if (set->owned.map == 0) { 185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 186 "nxge_uninit_txdma_channels: no channels")); 187 return; 188 } 189 190 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 191 if ((1 << tdc) & set->owned.map) { 192 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 193 } 194 } 195 196 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 197 } 198 199 void 200 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 201 { 202 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 203 204 if (nxgep->statsp->tdc_ksp[channel]) { 205 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 206 nxgep->statsp->tdc_ksp[channel] = 0; 207 } 208 209 (void) nxge_txdma_stop_channel(nxgep, channel); 210 nxge_unmap_txdma_channel(nxgep, channel); 211 212 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 213 "<== nxge_uninit_txdma_channel")); 214 } 215 216 void 217 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 218 uint32_t entries, uint32_t size) 219 { 220 size_t tsize; 221 *dest_p = *src_p; 222 tsize = size * entries; 223 dest_p->alength = tsize; 224 dest_p->nblocks = entries; 225 dest_p->block_size = size; 226 dest_p->offset += tsize; 227 228 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 229 src_p->alength -= tsize; 230 src_p->dma_cookie.dmac_laddress += tsize; 231 src_p->dma_cookie.dmac_size -= tsize; 232 } 233 234 /* 235 * nxge_reset_txdma_channel 236 * 237 * Reset a TDC. 238 * 239 * Arguments: 240 * nxgep 241 * channel The channel to reset. 242 * reg_data The current TX_CS. 243 * 244 * Notes: 245 * 246 * NPI/NXGE function calls: 247 * npi_txdma_channel_reset() 248 * npi_txdma_channel_control() 249 * 250 * Registers accessed: 251 * TX_CS DMC+0x40028 Transmit Control And Status 252 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 253 * 254 * Context: 255 * Any domain 256 */ 257 nxge_status_t 258 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 259 { 260 npi_status_t rs = NPI_SUCCESS; 261 nxge_status_t status = NXGE_OK; 262 npi_handle_t handle; 263 264 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 265 266 handle = NXGE_DEV_NPI_HANDLE(nxgep); 267 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 268 rs = npi_txdma_channel_reset(handle, channel); 269 } else { 270 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 271 channel); 272 } 273 274 if (rs != NPI_SUCCESS) { 275 status = NXGE_ERROR | rs; 276 } 277 278 /* 279 * Reset the tail (kick) register to 0. 280 * (Hardware will not reset it. Tx overflow fatal 281 * error if tail is not set to 0 after reset! 282 */ 283 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 284 285 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 286 return (status); 287 } 288 289 /* 290 * nxge_init_txdma_channel_event_mask 291 * 292 * Enable interrupts for a set of events. 293 * 294 * Arguments: 295 * nxgep 296 * channel The channel to map. 297 * mask_p The events to enable. 298 * 299 * Notes: 300 * 301 * NPI/NXGE function calls: 302 * npi_txdma_event_mask() 303 * 304 * Registers accessed: 305 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 306 * 307 * Context: 308 * Any domain 309 */ 310 nxge_status_t 311 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 312 p_tx_dma_ent_msk_t mask_p) 313 { 314 npi_handle_t handle; 315 npi_status_t rs = NPI_SUCCESS; 316 nxge_status_t status = NXGE_OK; 317 318 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 319 "<== nxge_init_txdma_channel_event_mask")); 320 321 handle = NXGE_DEV_NPI_HANDLE(nxgep); 322 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 323 if (rs != NPI_SUCCESS) { 324 status = NXGE_ERROR | rs; 325 } 326 327 return (status); 328 } 329 330 /* 331 * nxge_init_txdma_channel_cntl_stat 332 * 333 * Stop a TDC. If at first we don't succeed, inject an error. 334 * 335 * Arguments: 336 * nxgep 337 * channel The channel to stop. 338 * 339 * Notes: 340 * 341 * NPI/NXGE function calls: 342 * npi_txdma_control_status() 343 * 344 * Registers accessed: 345 * TX_CS DMC+0x40028 Transmit Control And Status 346 * 347 * Context: 348 * Any domain 349 */ 350 nxge_status_t 351 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 352 uint64_t reg_data) 353 { 354 npi_handle_t handle; 355 npi_status_t rs = NPI_SUCCESS; 356 nxge_status_t status = NXGE_OK; 357 358 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 359 "<== nxge_init_txdma_channel_cntl_stat")); 360 361 handle = NXGE_DEV_NPI_HANDLE(nxgep); 362 rs = npi_txdma_control_status(handle, OP_SET, channel, 363 (p_tx_cs_t)®_data); 364 365 if (rs != NPI_SUCCESS) { 366 status = NXGE_ERROR | rs; 367 } 368 369 return (status); 370 } 371 372 /* 373 * nxge_enable_txdma_channel 374 * 375 * Enable a TDC. 376 * 377 * Arguments: 378 * nxgep 379 * channel The channel to enable. 380 * tx_desc_p channel's transmit descriptor ring. 381 * mbox_p channel's mailbox, 382 * 383 * Notes: 384 * 385 * NPI/NXGE function calls: 386 * npi_txdma_ring_config() 387 * npi_txdma_mbox_config() 388 * npi_txdma_channel_init_enable() 389 * 390 * Registers accessed: 391 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 392 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 393 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 394 * TX_CS DMC+0x40028 Transmit Control And Status 395 * 396 * Context: 397 * Any domain 398 */ 399 nxge_status_t 400 nxge_enable_txdma_channel(p_nxge_t nxgep, 401 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 402 { 403 npi_handle_t handle; 404 npi_status_t rs = NPI_SUCCESS; 405 nxge_status_t status = NXGE_OK; 406 407 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 408 409 handle = NXGE_DEV_NPI_HANDLE(nxgep); 410 /* 411 * Use configuration data composed at init time. 412 * Write to hardware the transmit ring configurations. 413 */ 414 rs = npi_txdma_ring_config(handle, OP_SET, channel, 415 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 416 417 if (rs != NPI_SUCCESS) { 418 return (NXGE_ERROR | rs); 419 } 420 421 if (isLDOMguest(nxgep)) { 422 /* Add interrupt handler for this channel. */ 423 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 424 return (NXGE_ERROR); 425 } 426 427 /* Write to hardware the mailbox */ 428 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 429 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 430 431 if (rs != NPI_SUCCESS) { 432 return (NXGE_ERROR | rs); 433 } 434 435 /* Start the DMA engine. */ 436 rs = npi_txdma_channel_init_enable(handle, channel); 437 438 if (rs != NPI_SUCCESS) { 439 return (NXGE_ERROR | rs); 440 } 441 442 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 443 444 return (status); 445 } 446 447 void 448 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 449 boolean_t l4_cksum, int pkt_len, uint8_t npads, 450 p_tx_pkt_hdr_all_t pkthdrp, 451 t_uscalar_t start_offset, 452 t_uscalar_t stuff_offset) 453 { 454 p_tx_pkt_header_t hdrp; 455 p_mblk_t nmp; 456 uint64_t tmp; 457 size_t mblk_len; 458 size_t iph_len; 459 size_t hdrs_size; 460 uint8_t hdrs_buf[sizeof (struct ether_header) + 461 64 + sizeof (uint32_t)]; 462 uint8_t *cursor; 463 uint8_t *ip_buf; 464 uint16_t eth_type; 465 uint8_t ipproto; 466 boolean_t is_vlan = B_FALSE; 467 size_t eth_hdr_size; 468 469 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 470 471 /* 472 * Caller should zero out the headers first. 473 */ 474 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 475 476 if (fill_len) { 477 NXGE_DEBUG_MSG((NULL, TX_CTL, 478 "==> nxge_fill_tx_hdr: pkt_len %d " 479 "npads %d", pkt_len, npads)); 480 tmp = (uint64_t)pkt_len; 481 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 482 goto fill_tx_header_done; 483 } 484 485 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 486 487 /* 488 * mp is the original data packet (does not include the 489 * Neptune transmit header). 490 */ 491 nmp = mp; 492 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 493 "mp $%p b_rptr $%p len %d", 494 mp, nmp->b_rptr, MBLKL(nmp))); 495 /* copy ether_header from mblk to hdrs_buf */ 496 cursor = &hdrs_buf[0]; 497 tmp = sizeof (struct ether_vlan_header); 498 while ((nmp != NULL) && (tmp > 0)) { 499 size_t buflen; 500 mblk_len = MBLKL(nmp); 501 buflen = min((size_t)tmp, mblk_len); 502 bcopy(nmp->b_rptr, cursor, buflen); 503 cursor += buflen; 504 tmp -= buflen; 505 nmp = nmp->b_cont; 506 } 507 508 nmp = mp; 509 mblk_len = MBLKL(nmp); 510 ip_buf = NULL; 511 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 512 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 513 "ether type 0x%x", eth_type, hdrp->value)); 514 515 if (eth_type < ETHERMTU) { 516 tmp = 1ull; 517 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 518 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 519 "value 0x%llx", hdrp->value)); 520 if (*(hdrs_buf + sizeof (struct ether_header)) 521 == LLC_SNAP_SAP) { 522 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 523 sizeof (struct ether_header) + 6))); 524 NXGE_DEBUG_MSG((NULL, TX_CTL, 525 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 526 eth_type)); 527 } else { 528 goto fill_tx_header_done; 529 } 530 } else if (eth_type == VLAN_ETHERTYPE) { 531 tmp = 1ull; 532 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 533 534 eth_type = ntohs(((struct ether_vlan_header *) 535 hdrs_buf)->ether_type); 536 is_vlan = B_TRUE; 537 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 538 "value 0x%llx", hdrp->value)); 539 } 540 541 if (!is_vlan) { 542 eth_hdr_size = sizeof (struct ether_header); 543 } else { 544 eth_hdr_size = sizeof (struct ether_vlan_header); 545 } 546 547 switch (eth_type) { 548 case ETHERTYPE_IP: 549 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 550 ip_buf = nmp->b_rptr + eth_hdr_size; 551 mblk_len -= eth_hdr_size; 552 iph_len = ((*ip_buf) & 0x0f); 553 if (mblk_len > (iph_len + sizeof (uint32_t))) { 554 ip_buf = nmp->b_rptr; 555 ip_buf += eth_hdr_size; 556 } else { 557 ip_buf = NULL; 558 } 559 560 } 561 if (ip_buf == NULL) { 562 hdrs_size = 0; 563 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 564 while ((nmp) && (hdrs_size < 565 sizeof (hdrs_buf))) { 566 mblk_len = (size_t)nmp->b_wptr - 567 (size_t)nmp->b_rptr; 568 if (mblk_len >= 569 (sizeof (hdrs_buf) - hdrs_size)) 570 mblk_len = sizeof (hdrs_buf) - 571 hdrs_size; 572 bcopy(nmp->b_rptr, 573 &hdrs_buf[hdrs_size], mblk_len); 574 hdrs_size += mblk_len; 575 nmp = nmp->b_cont; 576 } 577 ip_buf = hdrs_buf; 578 ip_buf += eth_hdr_size; 579 iph_len = ((*ip_buf) & 0x0f); 580 } 581 582 ipproto = ip_buf[9]; 583 584 tmp = (uint64_t)iph_len; 585 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 586 tmp = (uint64_t)(eth_hdr_size >> 1); 587 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 588 589 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 590 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 591 "tmp 0x%x", 592 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 593 ipproto, tmp)); 594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 595 "value 0x%llx", hdrp->value)); 596 597 break; 598 599 case ETHERTYPE_IPV6: 600 hdrs_size = 0; 601 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 602 while ((nmp) && (hdrs_size < 603 sizeof (hdrs_buf))) { 604 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 605 if (mblk_len >= 606 (sizeof (hdrs_buf) - hdrs_size)) 607 mblk_len = sizeof (hdrs_buf) - 608 hdrs_size; 609 bcopy(nmp->b_rptr, 610 &hdrs_buf[hdrs_size], mblk_len); 611 hdrs_size += mblk_len; 612 nmp = nmp->b_cont; 613 } 614 ip_buf = hdrs_buf; 615 ip_buf += eth_hdr_size; 616 617 tmp = 1ull; 618 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 619 620 tmp = (eth_hdr_size >> 1); 621 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 622 623 /* byte 6 is the next header protocol */ 624 ipproto = ip_buf[6]; 625 626 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 627 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 628 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 629 ipproto)); 630 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 631 "value 0x%llx", hdrp->value)); 632 633 break; 634 635 default: 636 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 637 goto fill_tx_header_done; 638 } 639 640 switch (ipproto) { 641 case IPPROTO_TCP: 642 NXGE_DEBUG_MSG((NULL, TX_CTL, 643 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 644 if (l4_cksum) { 645 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 646 hdrp->value |= 647 (((uint64_t)(start_offset >> 1)) << 648 TX_PKT_HEADER_L4START_SHIFT); 649 hdrp->value |= 650 (((uint64_t)(stuff_offset >> 1)) << 651 TX_PKT_HEADER_L4STUFF_SHIFT); 652 653 NXGE_DEBUG_MSG((NULL, TX_CTL, 654 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 655 "value 0x%llx", hdrp->value)); 656 } 657 658 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 659 "value 0x%llx", hdrp->value)); 660 break; 661 662 case IPPROTO_UDP: 663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 664 if (l4_cksum) { 665 if (!nxge_cksum_offload) { 666 uint16_t *up; 667 uint16_t cksum; 668 t_uscalar_t stuff_len; 669 670 /* 671 * The checksum field has the 672 * partial checksum. 673 * IP_CSUM() macro calls ip_cksum() which 674 * can add in the partial checksum. 675 */ 676 cksum = IP_CSUM(mp, start_offset, 0); 677 stuff_len = stuff_offset; 678 nmp = mp; 679 mblk_len = MBLKL(nmp); 680 while ((nmp != NULL) && 681 (mblk_len < stuff_len)) { 682 stuff_len -= mblk_len; 683 nmp = nmp->b_cont; 684 } 685 ASSERT(nmp); 686 up = (uint16_t *)(nmp->b_rptr + stuff_len); 687 688 *up = cksum; 689 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 690 NXGE_DEBUG_MSG((NULL, TX_CTL, 691 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 692 "use sw cksum " 693 "write to $%p cksum 0x%x content up 0x%x", 694 stuff_len, 695 up, 696 cksum, 697 *up)); 698 } else { 699 /* Hardware will compute the full checksum */ 700 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 701 hdrp->value |= 702 (((uint64_t)(start_offset >> 1)) << 703 TX_PKT_HEADER_L4START_SHIFT); 704 hdrp->value |= 705 (((uint64_t)(stuff_offset >> 1)) << 706 TX_PKT_HEADER_L4STUFF_SHIFT); 707 708 NXGE_DEBUG_MSG((NULL, TX_CTL, 709 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 710 " use partial checksum " 711 "cksum 0x%x ", 712 "value 0x%llx", 713 stuff_offset, 714 IP_CSUM(mp, start_offset, 0), 715 hdrp->value)); 716 } 717 } 718 719 NXGE_DEBUG_MSG((NULL, TX_CTL, 720 "==> nxge_tx_pkt_hdr_init: UDP" 721 "value 0x%llx", hdrp->value)); 722 break; 723 724 default: 725 goto fill_tx_header_done; 726 } 727 728 fill_tx_header_done: 729 NXGE_DEBUG_MSG((NULL, TX_CTL, 730 "==> nxge_fill_tx_hdr: pkt_len %d " 731 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 732 733 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 734 } 735 736 /*ARGSUSED*/ 737 p_mblk_t 738 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 739 { 740 p_mblk_t newmp = NULL; 741 742 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 743 NXGE_DEBUG_MSG((NULL, TX_CTL, 744 "<== nxge_tx_pkt_header_reserve: allocb failed")); 745 return (NULL); 746 } 747 748 NXGE_DEBUG_MSG((NULL, TX_CTL, 749 "==> nxge_tx_pkt_header_reserve: get new mp")); 750 DB_TYPE(newmp) = M_DATA; 751 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 752 linkb(newmp, mp); 753 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 754 755 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 756 "b_rptr $%p b_wptr $%p", 757 newmp->b_rptr, newmp->b_wptr)); 758 759 NXGE_DEBUG_MSG((NULL, TX_CTL, 760 "<== nxge_tx_pkt_header_reserve: use new mp")); 761 762 return (newmp); 763 } 764 765 int 766 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 767 { 768 uint_t nmblks; 769 ssize_t len; 770 uint_t pkt_len; 771 p_mblk_t nmp, bmp, tmp; 772 uint8_t *b_wptr; 773 774 NXGE_DEBUG_MSG((NULL, TX_CTL, 775 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 776 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 777 778 nmp = mp; 779 bmp = mp; 780 nmblks = 0; 781 pkt_len = 0; 782 *tot_xfer_len_p = 0; 783 784 while (nmp) { 785 len = MBLKL(nmp); 786 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 787 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 788 len, pkt_len, nmblks, 789 *tot_xfer_len_p)); 790 791 if (len <= 0) { 792 bmp = nmp; 793 nmp = nmp->b_cont; 794 NXGE_DEBUG_MSG((NULL, TX_CTL, 795 "==> nxge_tx_pkt_nmblocks: " 796 "len (0) pkt_len %d nmblks %d", 797 pkt_len, nmblks)); 798 continue; 799 } 800 801 *tot_xfer_len_p += len; 802 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 803 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 804 len, pkt_len, nmblks, 805 *tot_xfer_len_p)); 806 807 if (len < nxge_bcopy_thresh) { 808 NXGE_DEBUG_MSG((NULL, TX_CTL, 809 "==> nxge_tx_pkt_nmblocks: " 810 "len %d (< thresh) pkt_len %d nmblks %d", 811 len, pkt_len, nmblks)); 812 if (pkt_len == 0) 813 nmblks++; 814 pkt_len += len; 815 if (pkt_len >= nxge_bcopy_thresh) { 816 pkt_len = 0; 817 len = 0; 818 nmp = bmp; 819 } 820 } else { 821 NXGE_DEBUG_MSG((NULL, TX_CTL, 822 "==> nxge_tx_pkt_nmblocks: " 823 "len %d (> thresh) pkt_len %d nmblks %d", 824 len, pkt_len, nmblks)); 825 pkt_len = 0; 826 nmblks++; 827 /* 828 * Hardware limits the transfer length to 4K. 829 * If len is more than 4K, we need to break 830 * it up to at most 2 more blocks. 831 */ 832 if (len > TX_MAX_TRANSFER_LENGTH) { 833 uint32_t nsegs; 834 835 nsegs = 1; 836 NXGE_DEBUG_MSG((NULL, TX_CTL, 837 "==> nxge_tx_pkt_nmblocks: " 838 "len %d pkt_len %d nmblks %d nsegs %d", 839 len, pkt_len, nmblks, nsegs)); 840 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 841 ++nsegs; 842 } 843 do { 844 b_wptr = nmp->b_rptr + 845 TX_MAX_TRANSFER_LENGTH; 846 nmp->b_wptr = b_wptr; 847 if ((tmp = dupb(nmp)) == NULL) { 848 return (0); 849 } 850 tmp->b_rptr = b_wptr; 851 tmp->b_wptr = nmp->b_wptr; 852 tmp->b_cont = nmp->b_cont; 853 nmp->b_cont = tmp; 854 nmblks++; 855 if (--nsegs) { 856 nmp = tmp; 857 } 858 } while (nsegs); 859 nmp = tmp; 860 } 861 } 862 863 /* 864 * Hardware limits the transmit gather pointers to 15. 865 */ 866 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 867 TX_MAX_GATHER_POINTERS) { 868 NXGE_DEBUG_MSG((NULL, TX_CTL, 869 "==> nxge_tx_pkt_nmblocks: pull msg - " 870 "len %d pkt_len %d nmblks %d", 871 len, pkt_len, nmblks)); 872 /* Pull all message blocks from b_cont */ 873 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 874 return (0); 875 } 876 freemsg(nmp->b_cont); 877 nmp->b_cont = tmp; 878 pkt_len = 0; 879 } 880 bmp = nmp; 881 nmp = nmp->b_cont; 882 } 883 884 NXGE_DEBUG_MSG((NULL, TX_CTL, 885 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 886 "nmblks %d len %d tot_xfer_len %d", 887 mp->b_rptr, mp->b_wptr, nmblks, 888 MBLKL(mp), *tot_xfer_len_p)); 889 890 return (nmblks); 891 } 892 893 static void 894 nxge_txdma_freemsg_list_add(p_tx_ring_t tx_ring_p, p_tx_msg_t msgp) 895 { 896 MUTEX_ENTER(&tx_ring_p->freelock); 897 if (tx_ring_p->tx_free_list_p != NULL) 898 msgp->nextp = tx_ring_p->tx_free_list_p; 899 tx_ring_p->tx_free_list_p = msgp; 900 MUTEX_EXIT(&tx_ring_p->freelock); 901 } 902 903 /* 904 * void 905 * nxge_txdma_freemsg_task() -- walk the list of messages to be 906 * freed and free the messages. 907 */ 908 void 909 nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p) 910 { 911 p_tx_msg_t msgp, nextp; 912 913 if (tx_ring_p->tx_free_list_p != NULL) { 914 MUTEX_ENTER(&tx_ring_p->freelock); 915 msgp = tx_ring_p->tx_free_list_p; 916 tx_ring_p->tx_free_list_p = (p_tx_msg_t)NULL; 917 MUTEX_EXIT(&tx_ring_p->freelock); 918 919 while (msgp != NULL) { 920 nextp = msgp->nextp; 921 if (msgp->tx_message != NULL) { 922 freemsg(msgp->tx_message); 923 msgp->tx_message = NULL; 924 } 925 msgp->nextp = NULL; 926 msgp = nextp; 927 } 928 } 929 } 930 931 boolean_t 932 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 933 { 934 boolean_t status = B_TRUE; 935 p_nxge_dma_common_t tx_desc_dma_p; 936 nxge_dma_common_t desc_area; 937 p_tx_desc_t tx_desc_ring_vp; 938 p_tx_desc_t tx_desc_p; 939 p_tx_desc_t tx_desc_pp; 940 tx_desc_t r_tx_desc; 941 p_tx_msg_t tx_msg_ring; 942 p_tx_msg_t tx_msg_p; 943 npi_handle_t handle; 944 tx_ring_hdl_t tx_head; 945 uint32_t pkt_len; 946 uint_t tx_rd_index; 947 uint16_t head_index, tail_index; 948 uint8_t tdc; 949 boolean_t head_wrap, tail_wrap; 950 p_nxge_tx_ring_stats_t tdc_stats; 951 int rc; 952 953 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 954 955 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 956 (nmblks != 0)); 957 NXGE_DEBUG_MSG((nxgep, TX_CTL, 958 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 959 tx_ring_p->descs_pending, nxge_reclaim_pending, 960 nmblks)); 961 if (!status) { 962 tx_desc_dma_p = &tx_ring_p->tdc_desc; 963 desc_area = tx_ring_p->tdc_desc; 964 handle = NXGE_DEV_NPI_HANDLE(nxgep); 965 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 966 tx_desc_ring_vp = 967 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 968 tx_rd_index = tx_ring_p->rd_index; 969 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 970 tx_msg_ring = tx_ring_p->tx_msg_ring; 971 tx_msg_p = &tx_msg_ring[tx_rd_index]; 972 tdc = tx_ring_p->tdc; 973 tdc_stats = tx_ring_p->tdc_stats; 974 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 975 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 976 } 977 978 tail_index = tx_ring_p->wr_index; 979 tail_wrap = tx_ring_p->wr_index_wrap; 980 981 NXGE_DEBUG_MSG((nxgep, TX_CTL, 982 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 983 "tail_index %d tail_wrap %d " 984 "tx_desc_p $%p ($%p) ", 985 tdc, tx_rd_index, tail_index, tail_wrap, 986 tx_desc_p, (*(uint64_t *)tx_desc_p))); 987 /* 988 * Read the hardware maintained transmit head 989 * and wrap around bit. 990 */ 991 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 992 head_index = tx_head.bits.ldw.head; 993 head_wrap = tx_head.bits.ldw.wrap; 994 NXGE_DEBUG_MSG((nxgep, TX_CTL, 995 "==> nxge_txdma_reclaim: " 996 "tx_rd_index %d tail %d tail_wrap %d " 997 "head %d wrap %d", 998 tx_rd_index, tail_index, tail_wrap, 999 head_index, head_wrap)); 1000 1001 if (head_index == tail_index) { 1002 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1003 tail_index, tail_wrap) && 1004 (head_index == tx_rd_index)) { 1005 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1006 "==> nxge_txdma_reclaim: EMPTY")); 1007 return (B_TRUE); 1008 } 1009 1010 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1011 "==> nxge_txdma_reclaim: Checking " 1012 "if ring full")); 1013 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1014 tail_wrap)) { 1015 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1016 "==> nxge_txdma_reclaim: full")); 1017 return (B_FALSE); 1018 } 1019 } 1020 1021 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1022 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 1023 1024 tx_desc_pp = &r_tx_desc; 1025 while ((tx_rd_index != head_index) && 1026 (tx_ring_p->descs_pending != 0)) { 1027 1028 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1029 "==> nxge_txdma_reclaim: Checking if pending")); 1030 1031 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1032 "==> nxge_txdma_reclaim: " 1033 "descs_pending %d ", 1034 tx_ring_p->descs_pending)); 1035 1036 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1037 "==> nxge_txdma_reclaim: " 1038 "(tx_rd_index %d head_index %d " 1039 "(tx_desc_p $%p)", 1040 tx_rd_index, head_index, 1041 tx_desc_p)); 1042 1043 tx_desc_pp->value = tx_desc_p->value; 1044 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1045 "==> nxge_txdma_reclaim: " 1046 "(tx_rd_index %d head_index %d " 1047 "tx_desc_p $%p (desc value 0x%llx) ", 1048 tx_rd_index, head_index, 1049 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 1050 1051 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1052 "==> nxge_txdma_reclaim: dump desc:")); 1053 1054 pkt_len = tx_desc_pp->bits.hdw.tr_len; 1055 tdc_stats->obytes += pkt_len; 1056 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 1057 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1058 "==> nxge_txdma_reclaim: pkt_len %d " 1059 "tdc channel %d opackets %d", 1060 pkt_len, 1061 tdc, 1062 tdc_stats->opackets)); 1063 1064 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1065 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1066 "tx_desc_p = $%p " 1067 "tx_desc_pp = $%p " 1068 "index = %d", 1069 tx_desc_p, 1070 tx_desc_pp, 1071 tx_ring_p->rd_index)); 1072 (void) dvma_unload(tx_msg_p->dvma_handle, 1073 0, -1); 1074 tx_msg_p->dvma_handle = NULL; 1075 if (tx_ring_p->dvma_wr_index == 1076 tx_ring_p->dvma_wrap_mask) { 1077 tx_ring_p->dvma_wr_index = 0; 1078 } else { 1079 tx_ring_p->dvma_wr_index++; 1080 } 1081 tx_ring_p->dvma_pending--; 1082 } else if (tx_msg_p->flags.dma_type == 1083 USE_DMA) { 1084 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1085 "==> nxge_txdma_reclaim: " 1086 "USE DMA")); 1087 if (rc = ddi_dma_unbind_handle 1088 (tx_msg_p->dma_handle)) { 1089 cmn_err(CE_WARN, "!nxge_reclaim: " 1090 "ddi_dma_unbind_handle " 1091 "failed. status %d", rc); 1092 } 1093 } 1094 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1095 "==> nxge_txdma_reclaim: count packets")); 1096 1097 /* 1098 * count a chained packet only once. 1099 */ 1100 if (tx_msg_p->tx_message != NULL) { 1101 nxge_txdma_freemsg_list_add(tx_ring_p, 1102 tx_msg_p); 1103 } 1104 1105 tx_msg_p->flags.dma_type = USE_NONE; 1106 tx_rd_index = tx_ring_p->rd_index; 1107 tx_rd_index = (tx_rd_index + 1) & 1108 tx_ring_p->tx_wrap_mask; 1109 tx_ring_p->rd_index = tx_rd_index; 1110 tx_ring_p->descs_pending--; 1111 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1112 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1113 } 1114 1115 status = (nmblks <= (tx_ring_p->tx_ring_size - 1116 tx_ring_p->descs_pending - 1117 TX_FULL_MARK)); 1118 if (status) { 1119 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1120 } 1121 } else { 1122 status = (nmblks <= 1123 (tx_ring_p->tx_ring_size - 1124 tx_ring_p->descs_pending - 1125 TX_FULL_MARK)); 1126 } 1127 1128 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1129 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1130 1131 return (status); 1132 } 1133 1134 /* 1135 * nxge_tx_intr 1136 * 1137 * Process a TDC interrupt 1138 * 1139 * Arguments: 1140 * arg1 A Logical Device state Vector (LSV) data structure. 1141 * arg2 nxge_t * 1142 * 1143 * Notes: 1144 * 1145 * NPI/NXGE function calls: 1146 * npi_txdma_control_status() 1147 * npi_intr_ldg_mgmt_set() 1148 * 1149 * nxge_tx_err_evnts() 1150 * nxge_txdma_reclaim() 1151 * 1152 * Registers accessed: 1153 * TX_CS DMC+0x40028 Transmit Control And Status 1154 * PIO_LDSV 1155 * 1156 * Context: 1157 * Any domain 1158 */ 1159 uint_t 1160 nxge_tx_intr(void *arg1, void *arg2) 1161 { 1162 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1163 p_nxge_t nxgep = (p_nxge_t)arg2; 1164 p_nxge_ldg_t ldgp; 1165 uint8_t channel; 1166 uint32_t vindex; 1167 npi_handle_t handle; 1168 tx_cs_t cs; 1169 p_tx_ring_t *tx_rings; 1170 p_tx_ring_t tx_ring_p; 1171 npi_status_t rs = NPI_SUCCESS; 1172 uint_t serviced = DDI_INTR_UNCLAIMED; 1173 nxge_status_t status = NXGE_OK; 1174 1175 if (ldvp == NULL) { 1176 NXGE_DEBUG_MSG((NULL, INT_CTL, 1177 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1178 nxgep, ldvp)); 1179 return (DDI_INTR_UNCLAIMED); 1180 } 1181 1182 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1183 nxgep = ldvp->nxgep; 1184 } 1185 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1186 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1187 nxgep, ldvp)); 1188 1189 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1190 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1191 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1192 "<== nxge_tx_intr: interface not started or intialized")); 1193 return (DDI_INTR_CLAIMED); 1194 } 1195 1196 /* 1197 * This interrupt handler is for a specific 1198 * transmit dma channel. 1199 */ 1200 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1201 /* Get the control and status for this channel. */ 1202 channel = ldvp->channel; 1203 ldgp = ldvp->ldgp; 1204 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1205 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1206 "channel %d", 1207 nxgep, ldvp, channel)); 1208 1209 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1210 vindex = ldvp->vdma_index; 1211 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1212 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1213 channel, vindex, rs)); 1214 if (!rs && cs.bits.ldw.mk) { 1215 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1216 "==> nxge_tx_intr:channel %d ring index %d " 1217 "status 0x%08x (mk bit set)", 1218 channel, vindex, rs)); 1219 tx_rings = nxgep->tx_rings->rings; 1220 tx_ring_p = tx_rings[vindex]; 1221 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1222 "==> nxge_tx_intr:channel %d ring index %d " 1223 "status 0x%08x (mk bit set, calling reclaim)", 1224 channel, vindex, rs)); 1225 1226 MUTEX_ENTER(&tx_ring_p->lock); 1227 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 1228 MUTEX_EXIT(&tx_ring_p->lock); 1229 1230 nxge_txdma_freemsg_task(tx_ring_p); 1231 1232 mac_tx_update(nxgep->mach); 1233 } 1234 1235 /* 1236 * Process other transmit control and status. 1237 * Check the ldv state. 1238 */ 1239 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1240 /* 1241 * Rearm this logical group if this is a single device 1242 * group. 1243 */ 1244 if (ldgp->nldvs == 1) { 1245 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1246 "==> nxge_tx_intr: rearm")); 1247 if (status == NXGE_OK) { 1248 if (isLDOMguest(nxgep)) { 1249 nxge_hio_ldgimgn(nxgep, ldgp); 1250 } else { 1251 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1252 B_TRUE, ldgp->ldg_timer); 1253 } 1254 } 1255 } 1256 1257 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1258 serviced = DDI_INTR_CLAIMED; 1259 return (serviced); 1260 } 1261 1262 void 1263 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1264 { 1265 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1266 1267 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1268 1269 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1270 } 1271 1272 void 1273 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1274 { 1275 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1276 1277 (void) nxge_txdma_stop(nxgep); 1278 1279 (void) nxge_fixup_txdma_rings(nxgep); 1280 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1281 (void) nxge_tx_mac_enable(nxgep); 1282 (void) nxge_txdma_hw_kick(nxgep); 1283 1284 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1285 } 1286 1287 npi_status_t 1288 nxge_txdma_channel_disable( 1289 nxge_t *nxge, 1290 int channel) 1291 { 1292 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1293 npi_status_t rs; 1294 tdmc_intr_dbg_t intr_dbg; 1295 1296 /* 1297 * Stop the dma channel and wait for the stop-done. 1298 * If the stop-done bit is not present, then force 1299 * an error so TXC will stop. 1300 * All channels bound to this port need to be stopped 1301 * and reset after injecting an interrupt error. 1302 */ 1303 rs = npi_txdma_channel_disable(handle, channel); 1304 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1305 "==> nxge_txdma_channel_disable(%d) " 1306 "rs 0x%x", channel, rs)); 1307 if (rs != NPI_SUCCESS) { 1308 /* Inject any error */ 1309 intr_dbg.value = 0; 1310 intr_dbg.bits.ldw.nack_pref = 1; 1311 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1312 "==> nxge_txdma_hw_mode: " 1313 "channel %d (stop failed 0x%x) " 1314 "(inject err)", rs, channel)); 1315 (void) npi_txdma_inj_int_error_set( 1316 handle, channel, &intr_dbg); 1317 rs = npi_txdma_channel_disable(handle, channel); 1318 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1319 "==> nxge_txdma_hw_mode: " 1320 "channel %d (stop again 0x%x) " 1321 "(after inject err)", 1322 rs, channel)); 1323 } 1324 1325 return (rs); 1326 } 1327 1328 /* 1329 * nxge_txdma_hw_mode 1330 * 1331 * Toggle all TDCs on (enable) or off (disable). 1332 * 1333 * Arguments: 1334 * nxgep 1335 * enable Enable or disable a TDC. 1336 * 1337 * Notes: 1338 * 1339 * NPI/NXGE function calls: 1340 * npi_txdma_channel_enable(TX_CS) 1341 * npi_txdma_channel_disable(TX_CS) 1342 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1343 * 1344 * Registers accessed: 1345 * TX_CS DMC+0x40028 Transmit Control And Status 1346 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1347 * 1348 * Context: 1349 * Any domain 1350 */ 1351 nxge_status_t 1352 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1353 { 1354 nxge_grp_set_t *set = &nxgep->tx_set; 1355 1356 npi_handle_t handle; 1357 nxge_status_t status; 1358 npi_status_t rs; 1359 int tdc; 1360 1361 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1362 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1363 1364 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1365 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1366 "<== nxge_txdma_mode: not initialized")); 1367 return (NXGE_ERROR); 1368 } 1369 1370 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1371 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1372 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1373 return (NXGE_ERROR); 1374 } 1375 1376 /* Enable or disable all of the TDCs owned by us. */ 1377 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1378 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1379 if ((1 << tdc) & set->owned.map) { 1380 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1381 if (ring) { 1382 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1383 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1384 if (enable) { 1385 rs = npi_txdma_channel_enable 1386 (handle, tdc); 1387 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1388 "==> nxge_txdma_hw_mode: " 1389 "channel %d (enable) rs 0x%x", 1390 tdc, rs)); 1391 } else { 1392 rs = nxge_txdma_channel_disable 1393 (nxgep, tdc); 1394 } 1395 } 1396 } 1397 } 1398 1399 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1400 1401 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1402 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1403 1404 return (status); 1405 } 1406 1407 void 1408 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1409 { 1410 npi_handle_t handle; 1411 1412 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1413 "==> nxge_txdma_enable_channel: channel %d", channel)); 1414 1415 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1416 /* enable the transmit dma channels */ 1417 (void) npi_txdma_channel_enable(handle, channel); 1418 1419 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1420 } 1421 1422 void 1423 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1424 { 1425 npi_handle_t handle; 1426 1427 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1428 "==> nxge_txdma_disable_channel: channel %d", channel)); 1429 1430 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1431 /* stop the transmit dma channels */ 1432 (void) npi_txdma_channel_disable(handle, channel); 1433 1434 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1435 } 1436 1437 /* 1438 * nxge_txdma_stop_inj_err 1439 * 1440 * Stop a TDC. If at first we don't succeed, inject an error. 1441 * 1442 * Arguments: 1443 * nxgep 1444 * channel The channel to stop. 1445 * 1446 * Notes: 1447 * 1448 * NPI/NXGE function calls: 1449 * npi_txdma_channel_disable() 1450 * npi_txdma_inj_int_error_set() 1451 * #if defined(NXGE_DEBUG) 1452 * nxge_txdma_regs_dump_channels(nxgep); 1453 * #endif 1454 * 1455 * Registers accessed: 1456 * TX_CS DMC+0x40028 Transmit Control And Status 1457 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1458 * 1459 * Context: 1460 * Any domain 1461 */ 1462 int 1463 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1464 { 1465 npi_handle_t handle; 1466 tdmc_intr_dbg_t intr_dbg; 1467 int status; 1468 npi_status_t rs = NPI_SUCCESS; 1469 1470 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1471 /* 1472 * Stop the dma channel waits for the stop done. 1473 * If the stop done bit is not set, then create 1474 * an error. 1475 */ 1476 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1477 rs = npi_txdma_channel_disable(handle, channel); 1478 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1479 if (status == NXGE_OK) { 1480 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1481 "<== nxge_txdma_stop_inj_err (channel %d): " 1482 "stopped OK", channel)); 1483 return (status); 1484 } 1485 1486 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1487 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1488 "injecting error", channel, rs)); 1489 /* Inject any error */ 1490 intr_dbg.value = 0; 1491 intr_dbg.bits.ldw.nack_pref = 1; 1492 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1493 1494 /* Stop done bit will be set as a result of error injection */ 1495 rs = npi_txdma_channel_disable(handle, channel); 1496 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1497 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1498 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1499 "<== nxge_txdma_stop_inj_err (channel %d): " 1500 "stopped OK ", channel)); 1501 return (status); 1502 } 1503 1504 #if defined(NXGE_DEBUG) 1505 nxge_txdma_regs_dump_channels(nxgep); 1506 #endif 1507 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1508 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1509 " (injected error but still not stopped)", channel, rs)); 1510 1511 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1512 return (status); 1513 } 1514 1515 /*ARGSUSED*/ 1516 void 1517 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1518 { 1519 nxge_grp_set_t *set = &nxgep->tx_set; 1520 int tdc; 1521 1522 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1523 1524 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1525 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1526 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1527 return; 1528 } 1529 1530 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1531 if ((1 << tdc) & set->owned.map) { 1532 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1533 if (ring) { 1534 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1535 "==> nxge_fixup_txdma_rings: channel %d", 1536 tdc)); 1537 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1538 } 1539 } 1540 } 1541 1542 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1543 } 1544 1545 /*ARGSUSED*/ 1546 void 1547 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1548 { 1549 p_tx_ring_t ring_p; 1550 1551 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1552 ring_p = nxge_txdma_get_ring(nxgep, channel); 1553 if (ring_p == NULL) { 1554 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1555 return; 1556 } 1557 1558 if (ring_p->tdc != channel) { 1559 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1560 "<== nxge_txdma_fix_channel: channel not matched " 1561 "ring tdc %d passed channel", 1562 ring_p->tdc, channel)); 1563 return; 1564 } 1565 1566 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1567 1568 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1569 } 1570 1571 /*ARGSUSED*/ 1572 void 1573 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1574 { 1575 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1576 1577 if (ring_p == NULL) { 1578 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1579 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1580 return; 1581 } 1582 1583 if (ring_p->tdc != channel) { 1584 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1585 "<== nxge_txdma_fixup_channel: channel not matched " 1586 "ring tdc %d passed channel", 1587 ring_p->tdc, channel)); 1588 return; 1589 } 1590 1591 MUTEX_ENTER(&ring_p->lock); 1592 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1593 ring_p->rd_index = 0; 1594 ring_p->wr_index = 0; 1595 ring_p->ring_head.value = 0; 1596 ring_p->ring_kick_tail.value = 0; 1597 ring_p->descs_pending = 0; 1598 MUTEX_EXIT(&ring_p->lock); 1599 nxge_txdma_freemsg_task(ring_p); 1600 1601 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1602 } 1603 1604 /*ARGSUSED*/ 1605 void 1606 nxge_txdma_hw_kick(p_nxge_t nxgep) 1607 { 1608 nxge_grp_set_t *set = &nxgep->tx_set; 1609 int tdc; 1610 1611 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1612 1613 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1614 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1615 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1616 return; 1617 } 1618 1619 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1620 if ((1 << tdc) & set->owned.map) { 1621 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1622 if (ring) { 1623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1624 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1625 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1626 } 1627 } 1628 } 1629 1630 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1631 } 1632 1633 /*ARGSUSED*/ 1634 void 1635 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1636 { 1637 p_tx_ring_t ring_p; 1638 1639 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1640 1641 ring_p = nxge_txdma_get_ring(nxgep, channel); 1642 if (ring_p == NULL) { 1643 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1644 " nxge_txdma_kick_channel")); 1645 return; 1646 } 1647 1648 if (ring_p->tdc != channel) { 1649 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1650 "<== nxge_txdma_kick_channel: channel not matched " 1651 "ring tdc %d passed channel", 1652 ring_p->tdc, channel)); 1653 return; 1654 } 1655 1656 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1657 1658 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1659 } 1660 1661 /*ARGSUSED*/ 1662 void 1663 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1664 { 1665 1666 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1667 1668 if (ring_p == NULL) { 1669 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1670 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1671 return; 1672 } 1673 1674 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1675 } 1676 1677 /* 1678 * nxge_check_tx_hang 1679 * 1680 * Check the state of all TDCs belonging to nxgep. 1681 * 1682 * Arguments: 1683 * nxgep 1684 * 1685 * Notes: 1686 * Called by nxge_hw.c:nxge_check_hw_state(). 1687 * 1688 * NPI/NXGE function calls: 1689 * 1690 * Registers accessed: 1691 * 1692 * Context: 1693 * Any domain 1694 */ 1695 /*ARGSUSED*/ 1696 void 1697 nxge_check_tx_hang(p_nxge_t nxgep) 1698 { 1699 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1700 1701 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1702 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1703 goto nxge_check_tx_hang_exit; 1704 } 1705 1706 /* 1707 * Needs inputs from hardware for regs: 1708 * head index had not moved since last timeout. 1709 * packets not transmitted or stuffed registers. 1710 */ 1711 if (nxge_txdma_hung(nxgep)) { 1712 nxge_fixup_hung_txdma_rings(nxgep); 1713 } 1714 1715 nxge_check_tx_hang_exit: 1716 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1717 } 1718 1719 /* 1720 * nxge_txdma_hung 1721 * 1722 * Reset a TDC. 1723 * 1724 * Arguments: 1725 * nxgep 1726 * channel The channel to reset. 1727 * reg_data The current TX_CS. 1728 * 1729 * Notes: 1730 * Called by nxge_check_tx_hang() 1731 * 1732 * NPI/NXGE function calls: 1733 * nxge_txdma_channel_hung() 1734 * 1735 * Registers accessed: 1736 * 1737 * Context: 1738 * Any domain 1739 */ 1740 int 1741 nxge_txdma_hung(p_nxge_t nxgep) 1742 { 1743 nxge_grp_set_t *set = &nxgep->tx_set; 1744 int tdc; 1745 boolean_t shared; 1746 1747 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1748 1749 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1750 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1751 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1752 return (B_FALSE); 1753 } 1754 1755 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1756 /* 1757 * Grab the shared state of the TDC. 1758 */ 1759 if (isLDOMservice(nxgep)) { 1760 nxge_hio_data_t *nhd = 1761 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1762 1763 MUTEX_ENTER(&nhd->lock); 1764 shared = nxgep->tdc_is_shared[tdc]; 1765 MUTEX_EXIT(&nhd->lock); 1766 } else { 1767 shared = B_FALSE; 1768 } 1769 1770 /* 1771 * Now, process continue to process. 1772 */ 1773 if (((1 << tdc) & set->owned.map) && !shared) { 1774 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1775 if (ring) { 1776 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1777 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1778 "==> nxge_txdma_hung: TDC %d hung", 1779 tdc)); 1780 return (B_TRUE); 1781 } 1782 } 1783 } 1784 } 1785 1786 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1787 1788 return (B_FALSE); 1789 } 1790 1791 /* 1792 * nxge_txdma_channel_hung 1793 * 1794 * Reset a TDC. 1795 * 1796 * Arguments: 1797 * nxgep 1798 * ring <channel>'s ring. 1799 * channel The channel to reset. 1800 * 1801 * Notes: 1802 * Called by nxge_txdma.c:nxge_txdma_hung() 1803 * 1804 * NPI/NXGE function calls: 1805 * npi_txdma_ring_head_get() 1806 * 1807 * Registers accessed: 1808 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1809 * 1810 * Context: 1811 * Any domain 1812 */ 1813 int 1814 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1815 { 1816 uint16_t head_index, tail_index; 1817 boolean_t head_wrap, tail_wrap; 1818 npi_handle_t handle; 1819 tx_ring_hdl_t tx_head; 1820 uint_t tx_rd_index; 1821 1822 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1823 1824 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1825 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1826 "==> nxge_txdma_channel_hung: channel %d", channel)); 1827 MUTEX_ENTER(&tx_ring_p->lock); 1828 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1829 1830 tail_index = tx_ring_p->wr_index; 1831 tail_wrap = tx_ring_p->wr_index_wrap; 1832 tx_rd_index = tx_ring_p->rd_index; 1833 MUTEX_EXIT(&tx_ring_p->lock); 1834 nxge_txdma_freemsg_task(tx_ring_p); 1835 1836 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1837 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1838 "tail_index %d tail_wrap %d ", 1839 channel, tx_rd_index, tail_index, tail_wrap)); 1840 /* 1841 * Read the hardware maintained transmit head 1842 * and wrap around bit. 1843 */ 1844 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1845 head_index = tx_head.bits.ldw.head; 1846 head_wrap = tx_head.bits.ldw.wrap; 1847 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1848 "==> nxge_txdma_channel_hung: " 1849 "tx_rd_index %d tail %d tail_wrap %d " 1850 "head %d wrap %d", 1851 tx_rd_index, tail_index, tail_wrap, 1852 head_index, head_wrap)); 1853 1854 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1855 tail_index, tail_wrap) && 1856 (head_index == tx_rd_index)) { 1857 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1858 "==> nxge_txdma_channel_hung: EMPTY")); 1859 return (B_FALSE); 1860 } 1861 1862 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1863 "==> nxge_txdma_channel_hung: Checking if ring full")); 1864 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1865 tail_wrap)) { 1866 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1867 "==> nxge_txdma_channel_hung: full")); 1868 return (B_TRUE); 1869 } 1870 1871 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1872 1873 return (B_FALSE); 1874 } 1875 1876 /* 1877 * nxge_fixup_hung_txdma_rings 1878 * 1879 * Disable a TDC. 1880 * 1881 * Arguments: 1882 * nxgep 1883 * channel The channel to reset. 1884 * reg_data The current TX_CS. 1885 * 1886 * Notes: 1887 * Called by nxge_check_tx_hang() 1888 * 1889 * NPI/NXGE function calls: 1890 * npi_txdma_ring_head_get() 1891 * 1892 * Registers accessed: 1893 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1894 * 1895 * Context: 1896 * Any domain 1897 */ 1898 /*ARGSUSED*/ 1899 void 1900 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1901 { 1902 nxge_grp_set_t *set = &nxgep->tx_set; 1903 int tdc; 1904 1905 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1906 1907 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1908 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1909 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1910 return; 1911 } 1912 1913 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1914 if ((1 << tdc) & set->owned.map) { 1915 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1916 if (ring) { 1917 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1918 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1919 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1920 tdc)); 1921 } 1922 } 1923 } 1924 1925 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1926 } 1927 1928 /* 1929 * nxge_txdma_fixup_hung_channel 1930 * 1931 * 'Fix' a hung TDC. 1932 * 1933 * Arguments: 1934 * nxgep 1935 * channel The channel to fix. 1936 * 1937 * Notes: 1938 * Called by nxge_fixup_hung_txdma_rings() 1939 * 1940 * 1. Reclaim the TDC. 1941 * 2. Disable the TDC. 1942 * 1943 * NPI/NXGE function calls: 1944 * nxge_txdma_reclaim() 1945 * npi_txdma_channel_disable(TX_CS) 1946 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1947 * 1948 * Registers accessed: 1949 * TX_CS DMC+0x40028 Transmit Control And Status 1950 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1951 * 1952 * Context: 1953 * Any domain 1954 */ 1955 /*ARGSUSED*/ 1956 void 1957 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1958 { 1959 p_tx_ring_t ring_p; 1960 1961 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1962 ring_p = nxge_txdma_get_ring(nxgep, channel); 1963 if (ring_p == NULL) { 1964 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1965 "<== nxge_txdma_fix_hung_channel")); 1966 return; 1967 } 1968 1969 if (ring_p->tdc != channel) { 1970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1971 "<== nxge_txdma_fix_hung_channel: channel not matched " 1972 "ring tdc %d passed channel", 1973 ring_p->tdc, channel)); 1974 return; 1975 } 1976 1977 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1978 1979 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1980 } 1981 1982 /*ARGSUSED*/ 1983 void 1984 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1985 uint16_t channel) 1986 { 1987 npi_handle_t handle; 1988 tdmc_intr_dbg_t intr_dbg; 1989 int status = NXGE_OK; 1990 1991 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1992 1993 if (ring_p == NULL) { 1994 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1995 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1996 return; 1997 } 1998 1999 if (ring_p->tdc != channel) { 2000 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2001 "<== nxge_txdma_fixup_hung_channel: channel " 2002 "not matched " 2003 "ring tdc %d passed channel", 2004 ring_p->tdc, channel)); 2005 return; 2006 } 2007 2008 /* Reclaim descriptors */ 2009 MUTEX_ENTER(&ring_p->lock); 2010 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 2011 MUTEX_EXIT(&ring_p->lock); 2012 2013 nxge_txdma_freemsg_task(ring_p); 2014 2015 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2016 /* 2017 * Stop the dma channel waits for the stop done. 2018 * If the stop done bit is not set, then force 2019 * an error. 2020 */ 2021 status = npi_txdma_channel_disable(handle, channel); 2022 if (!(status & NPI_TXDMA_STOP_FAILED)) { 2023 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2024 "<== nxge_txdma_fixup_hung_channel: stopped OK " 2025 "ring tdc %d passed channel %d", 2026 ring_p->tdc, channel)); 2027 return; 2028 } 2029 2030 /* Inject any error */ 2031 intr_dbg.value = 0; 2032 intr_dbg.bits.ldw.nack_pref = 1; 2033 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 2034 2035 /* Stop done bit will be set as a result of error injection */ 2036 status = npi_txdma_channel_disable(handle, channel); 2037 if (!(status & NPI_TXDMA_STOP_FAILED)) { 2038 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2039 "<== nxge_txdma_fixup_hung_channel: stopped again" 2040 "ring tdc %d passed channel", 2041 ring_p->tdc, channel)); 2042 return; 2043 } 2044 2045 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2046 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 2047 "ring tdc %d passed channel", 2048 ring_p->tdc, channel)); 2049 2050 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 2051 } 2052 2053 /*ARGSUSED*/ 2054 void 2055 nxge_reclaim_rings(p_nxge_t nxgep) 2056 { 2057 nxge_grp_set_t *set = &nxgep->tx_set; 2058 int tdc; 2059 2060 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 2061 2062 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2063 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2064 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2065 return; 2066 } 2067 2068 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2069 if ((1 << tdc) & set->owned.map) { 2070 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2071 if (ring) { 2072 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2073 "==> nxge_reclaim_rings: TDC %d", tdc)); 2074 MUTEX_ENTER(&ring->lock); 2075 (void) nxge_txdma_reclaim(nxgep, ring, tdc); 2076 MUTEX_EXIT(&ring->lock); 2077 2078 nxge_txdma_freemsg_task(ring); 2079 } 2080 } 2081 } 2082 2083 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 2084 } 2085 2086 void 2087 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 2088 { 2089 nxge_grp_set_t *set = &nxgep->tx_set; 2090 npi_handle_t handle; 2091 int tdc; 2092 2093 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2094 2095 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2096 2097 if (!isLDOMguest(nxgep)) { 2098 (void) npi_txdma_dump_fzc_regs(handle); 2099 2100 /* Dump TXC registers. */ 2101 (void) npi_txc_dump_fzc_regs(handle); 2102 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2103 } 2104 2105 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2106 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2107 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2108 return; 2109 } 2110 2111 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2112 if ((1 << tdc) & set->owned.map) { 2113 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2114 if (ring) { 2115 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2116 "==> nxge_txdma_regs_dump_channels: " 2117 "TDC %d", tdc)); 2118 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2119 2120 /* Dump TXC registers, if able to. */ 2121 if (!isLDOMguest(nxgep)) { 2122 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2123 "==> nxge_txdma_regs_dump_channels:" 2124 " FZC TDC %d", tdc)); 2125 (void) npi_txc_dump_tdc_fzc_regs 2126 (handle, tdc); 2127 } 2128 nxge_txdma_regs_dump(nxgep, tdc); 2129 } 2130 } 2131 } 2132 2133 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2134 } 2135 2136 void 2137 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2138 { 2139 npi_handle_t handle; 2140 tx_ring_hdl_t hdl; 2141 tx_ring_kick_t kick; 2142 tx_cs_t cs; 2143 txc_control_t control; 2144 uint32_t bitmap = 0; 2145 uint32_t burst = 0; 2146 uint32_t bytes = 0; 2147 dma_log_page_t cfg; 2148 2149 printf("\n\tfunc # %d tdc %d ", 2150 nxgep->function_num, channel); 2151 cfg.page_num = 0; 2152 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2153 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2154 printf("\n\tlog page func %d valid page 0 %d", 2155 cfg.func_num, cfg.valid); 2156 cfg.page_num = 1; 2157 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2158 printf("\n\tlog page func %d valid page 1 %d", 2159 cfg.func_num, cfg.valid); 2160 2161 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2162 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2163 printf("\n\thead value is 0x%0llx", 2164 (long long)hdl.value); 2165 printf("\n\thead index %d", hdl.bits.ldw.head); 2166 printf("\n\tkick value is 0x%0llx", 2167 (long long)kick.value); 2168 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2169 2170 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2171 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2172 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2173 2174 (void) npi_txc_control(handle, OP_GET, &control); 2175 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2176 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2177 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2178 2179 printf("\n\tTXC port control 0x%0llx", 2180 (long long)control.value); 2181 printf("\n\tTXC port bitmap 0x%x", bitmap); 2182 printf("\n\tTXC max burst %d", burst); 2183 printf("\n\tTXC bytes xmt %d\n", bytes); 2184 2185 { 2186 ipp_status_t status; 2187 2188 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2189 #if defined(__i386) 2190 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2191 #else 2192 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2193 #endif 2194 } 2195 } 2196 2197 /* 2198 * nxge_tdc_hvio_setup 2199 * 2200 * I'm not exactly sure what this code does. 2201 * 2202 * Arguments: 2203 * nxgep 2204 * channel The channel to map. 2205 * 2206 * Notes: 2207 * 2208 * NPI/NXGE function calls: 2209 * na 2210 * 2211 * Context: 2212 * Service domain? 2213 */ 2214 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2215 static void 2216 nxge_tdc_hvio_setup( 2217 nxge_t *nxgep, int channel) 2218 { 2219 nxge_dma_common_t *data; 2220 nxge_dma_common_t *control; 2221 tx_ring_t *ring; 2222 2223 ring = nxgep->tx_rings->rings[channel]; 2224 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2225 2226 ring->hv_set = B_FALSE; 2227 2228 ring->hv_tx_buf_base_ioaddr_pp = 2229 (uint64_t)data->orig_ioaddr_pp; 2230 ring->hv_tx_buf_ioaddr_size = 2231 (uint64_t)data->orig_alength; 2232 2233 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2234 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2235 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2236 ring->hv_tx_buf_base_ioaddr_pp, 2237 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2238 data->ioaddr_pp, data->orig_vatopa, 2239 data->orig_alength, data->orig_alength)); 2240 2241 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2242 2243 ring->hv_tx_cntl_base_ioaddr_pp = 2244 (uint64_t)control->orig_ioaddr_pp; 2245 ring->hv_tx_cntl_ioaddr_size = 2246 (uint64_t)control->orig_alength; 2247 2248 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2249 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2250 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2251 ring->hv_tx_cntl_base_ioaddr_pp, 2252 control->orig_ioaddr_pp, control->orig_vatopa, 2253 ring->hv_tx_cntl_ioaddr_size, 2254 control->orig_alength, control->orig_alength)); 2255 } 2256 #endif 2257 2258 static nxge_status_t 2259 nxge_map_txdma(p_nxge_t nxgep, int channel) 2260 { 2261 nxge_dma_common_t **pData; 2262 nxge_dma_common_t **pControl; 2263 tx_ring_t **pRing, *ring; 2264 tx_mbox_t **mailbox; 2265 uint32_t num_chunks; 2266 2267 nxge_status_t status = NXGE_OK; 2268 2269 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2270 2271 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2272 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2273 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2274 "<== nxge_map_txdma: buf not allocated")); 2275 return (NXGE_ERROR); 2276 } 2277 } 2278 2279 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2280 return (NXGE_ERROR); 2281 2282 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2283 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2284 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2285 pRing = &nxgep->tx_rings->rings[channel]; 2286 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2287 2288 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2289 "tx_rings $%p tx_desc_rings $%p", 2290 nxgep->tx_rings, nxgep->tx_rings->rings)); 2291 2292 /* 2293 * Map descriptors from the buffer pools for <channel>. 2294 */ 2295 2296 /* 2297 * Set up and prepare buffer blocks, descriptors 2298 * and mailbox. 2299 */ 2300 status = nxge_map_txdma_channel(nxgep, channel, 2301 pData, pRing, num_chunks, pControl, mailbox); 2302 if (status != NXGE_OK) { 2303 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2304 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2305 "returned 0x%x", 2306 nxgep, channel, status)); 2307 return (status); 2308 } 2309 2310 ring = *pRing; 2311 2312 ring->index = (uint16_t)channel; 2313 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2314 2315 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2316 if (isLDOMguest(nxgep)) { 2317 (void) nxge_tdc_lp_conf(nxgep, channel); 2318 } else { 2319 nxge_tdc_hvio_setup(nxgep, channel); 2320 } 2321 #endif 2322 2323 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2324 "(status 0x%x channel %d)", status, channel)); 2325 2326 return (status); 2327 } 2328 2329 static nxge_status_t 2330 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2331 p_nxge_dma_common_t *dma_buf_p, 2332 p_tx_ring_t *tx_desc_p, 2333 uint32_t num_chunks, 2334 p_nxge_dma_common_t *dma_cntl_p, 2335 p_tx_mbox_t *tx_mbox_p) 2336 { 2337 int status = NXGE_OK; 2338 2339 /* 2340 * Set up and prepare buffer blocks, descriptors 2341 * and mailbox. 2342 */ 2343 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2344 "==> nxge_map_txdma_channel (channel %d)", channel)); 2345 /* 2346 * Transmit buffer blocks 2347 */ 2348 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2349 dma_buf_p, tx_desc_p, num_chunks); 2350 if (status != NXGE_OK) { 2351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2352 "==> nxge_map_txdma_channel (channel %d): " 2353 "map buffer failed 0x%x", channel, status)); 2354 goto nxge_map_txdma_channel_exit; 2355 } 2356 2357 /* 2358 * Transmit block ring, and mailbox. 2359 */ 2360 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2361 tx_mbox_p); 2362 2363 goto nxge_map_txdma_channel_exit; 2364 2365 nxge_map_txdma_channel_fail1: 2366 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2367 "==> nxge_map_txdma_channel: unmap buf" 2368 "(status 0x%x channel %d)", 2369 status, channel)); 2370 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2371 2372 nxge_map_txdma_channel_exit: 2373 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2374 "<== nxge_map_txdma_channel: " 2375 "(status 0x%x channel %d)", 2376 status, channel)); 2377 2378 return (status); 2379 } 2380 2381 /*ARGSUSED*/ 2382 static void 2383 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2384 { 2385 tx_ring_t *ring; 2386 tx_mbox_t *mailbox; 2387 2388 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2389 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2390 /* 2391 * unmap tx block ring, and mailbox. 2392 */ 2393 ring = nxgep->tx_rings->rings[channel]; 2394 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2395 2396 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2397 2398 /* unmap buffer blocks */ 2399 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2400 2401 nxge_free_txb(nxgep, channel); 2402 2403 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2404 } 2405 2406 /* 2407 * nxge_map_txdma_channel_cfg_ring 2408 * 2409 * Map a TDC into our kernel space. 2410 * This function allocates all of the per-channel data structures. 2411 * 2412 * Arguments: 2413 * nxgep 2414 * dma_channel The channel to map. 2415 * dma_cntl_p 2416 * tx_ring_p dma_channel's transmit ring 2417 * tx_mbox_p dma_channel's mailbox 2418 * 2419 * Notes: 2420 * 2421 * NPI/NXGE function calls: 2422 * nxge_setup_dma_common() 2423 * 2424 * Registers accessed: 2425 * none. 2426 * 2427 * Context: 2428 * Any domain 2429 */ 2430 /*ARGSUSED*/ 2431 static void 2432 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2433 p_nxge_dma_common_t *dma_cntl_p, 2434 p_tx_ring_t tx_ring_p, 2435 p_tx_mbox_t *tx_mbox_p) 2436 { 2437 p_tx_mbox_t mboxp; 2438 p_nxge_dma_common_t cntl_dmap; 2439 p_nxge_dma_common_t dmap; 2440 p_tx_rng_cfig_t tx_ring_cfig_p; 2441 p_tx_ring_kick_t tx_ring_kick_p; 2442 p_tx_cs_t tx_cs_p; 2443 p_tx_dma_ent_msk_t tx_evmask_p; 2444 p_txdma_mbh_t mboxh_p; 2445 p_txdma_mbl_t mboxl_p; 2446 uint64_t tx_desc_len; 2447 2448 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2449 "==> nxge_map_txdma_channel_cfg_ring")); 2450 2451 cntl_dmap = *dma_cntl_p; 2452 2453 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2454 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2455 sizeof (tx_desc_t)); 2456 /* 2457 * Zero out transmit ring descriptors. 2458 */ 2459 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2460 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2461 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2462 tx_cs_p = &(tx_ring_p->tx_cs); 2463 tx_evmask_p = &(tx_ring_p->tx_evmask); 2464 tx_ring_cfig_p->value = 0; 2465 tx_ring_kick_p->value = 0; 2466 tx_cs_p->value = 0; 2467 tx_evmask_p->value = 0; 2468 2469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2470 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2471 dma_channel, 2472 dmap->dma_cookie.dmac_laddress)); 2473 2474 tx_ring_cfig_p->value = 0; 2475 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2476 tx_ring_cfig_p->value = 2477 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2478 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2479 2480 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2481 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2482 dma_channel, 2483 tx_ring_cfig_p->value)); 2484 2485 tx_cs_p->bits.ldw.rst = 1; 2486 2487 /* Map in mailbox */ 2488 mboxp = (p_tx_mbox_t) 2489 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2490 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2491 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2492 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2493 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2494 mboxh_p->value = mboxl_p->value = 0; 2495 2496 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2497 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2498 dmap->dma_cookie.dmac_laddress)); 2499 2500 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2501 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2502 2503 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2504 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2505 2506 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2507 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2508 dmap->dma_cookie.dmac_laddress)); 2509 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2510 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2511 "mbox $%p", 2512 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2513 tx_ring_p->page_valid.value = 0; 2514 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2515 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2516 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2517 tx_ring_p->page_hdl.value = 0; 2518 2519 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2520 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2521 2522 tx_ring_p->max_burst.value = 0; 2523 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2524 2525 *tx_mbox_p = mboxp; 2526 2527 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2528 "<== nxge_map_txdma_channel_cfg_ring")); 2529 } 2530 2531 /*ARGSUSED*/ 2532 static void 2533 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2534 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2535 { 2536 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2537 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2538 tx_ring_p->tdc)); 2539 2540 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2541 2542 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2543 "<== nxge_unmap_txdma_channel_cfg_ring")); 2544 } 2545 2546 /* 2547 * nxge_map_txdma_channel_buf_ring 2548 * 2549 * 2550 * Arguments: 2551 * nxgep 2552 * channel The channel to map. 2553 * dma_buf_p 2554 * tx_desc_p channel's descriptor ring 2555 * num_chunks 2556 * 2557 * Notes: 2558 * 2559 * NPI/NXGE function calls: 2560 * nxge_setup_dma_common() 2561 * 2562 * Registers accessed: 2563 * none. 2564 * 2565 * Context: 2566 * Any domain 2567 */ 2568 static nxge_status_t 2569 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2570 p_nxge_dma_common_t *dma_buf_p, 2571 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2572 { 2573 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2574 p_nxge_dma_common_t dmap; 2575 nxge_os_dma_handle_t tx_buf_dma_handle; 2576 p_tx_ring_t tx_ring_p; 2577 p_tx_msg_t tx_msg_ring; 2578 nxge_status_t status = NXGE_OK; 2579 int ddi_status = DDI_SUCCESS; 2580 int i, j, index; 2581 uint32_t size, bsize; 2582 uint32_t nblocks, nmsgs; 2583 2584 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2585 "==> nxge_map_txdma_channel_buf_ring")); 2586 2587 dma_bufp = tmp_bufp = *dma_buf_p; 2588 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2589 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2590 "chunks bufp $%p", 2591 channel, num_chunks, dma_bufp)); 2592 2593 nmsgs = 0; 2594 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2595 nmsgs += tmp_bufp->nblocks; 2596 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2597 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2598 "bufp $%p nblocks %d nmsgs %d", 2599 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2600 } 2601 if (!nmsgs) { 2602 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2603 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2604 "no msg blocks", 2605 channel)); 2606 status = NXGE_ERROR; 2607 goto nxge_map_txdma_channel_buf_ring_exit; 2608 } 2609 2610 tx_ring_p = (p_tx_ring_t) 2611 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2612 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2613 (void *)nxgep->interrupt_cookie); 2614 MUTEX_INIT(&tx_ring_p->freelock, NULL, MUTEX_DRIVER, 2615 (void *)nxgep->interrupt_cookie); 2616 2617 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2618 tx_ring_p->tx_ring_busy = B_FALSE; 2619 tx_ring_p->nxgep = nxgep; 2620 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2621 nxge_serial_tx, tx_ring_p); 2622 /* 2623 * Allocate transmit message rings and handles for packets 2624 * not to be copied to premapped buffers. 2625 */ 2626 size = nmsgs * sizeof (tx_msg_t); 2627 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2628 for (i = 0; i < nmsgs; i++) { 2629 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2630 DDI_DMA_DONTWAIT, 0, 2631 &tx_msg_ring[i].dma_handle); 2632 if (ddi_status != DDI_SUCCESS) { 2633 status |= NXGE_DDI_FAILED; 2634 break; 2635 } 2636 } 2637 if (i < nmsgs) { 2638 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2639 "Allocate handles failed.")); 2640 goto nxge_map_txdma_channel_buf_ring_fail1; 2641 } 2642 2643 tx_ring_p->tdc = channel; 2644 tx_ring_p->tx_msg_ring = tx_msg_ring; 2645 tx_ring_p->tx_ring_size = nmsgs; 2646 tx_ring_p->num_chunks = num_chunks; 2647 if (!nxge_tx_intr_thres) { 2648 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2649 } 2650 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2651 tx_ring_p->rd_index = 0; 2652 tx_ring_p->wr_index = 0; 2653 tx_ring_p->ring_head.value = 0; 2654 tx_ring_p->ring_kick_tail.value = 0; 2655 tx_ring_p->descs_pending = 0; 2656 2657 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2658 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2659 "actual tx desc max %d nmsgs %d " 2660 "(config nxge_tx_ring_size %d)", 2661 channel, tx_ring_p->tx_ring_size, nmsgs, 2662 nxge_tx_ring_size)); 2663 2664 /* 2665 * Map in buffers from the buffer pool. 2666 */ 2667 index = 0; 2668 bsize = dma_bufp->block_size; 2669 2670 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2671 "dma_bufp $%p tx_rng_p $%p " 2672 "tx_msg_rng_p $%p bsize %d", 2673 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2674 2675 tx_buf_dma_handle = dma_bufp->dma_handle; 2676 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2677 bsize = dma_bufp->block_size; 2678 nblocks = dma_bufp->nblocks; 2679 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2680 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2681 "size %d dma_bufp $%p", 2682 i, sizeof (nxge_dma_common_t), dma_bufp)); 2683 2684 for (j = 0; j < nblocks; j++) { 2685 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2686 tx_msg_ring[index].nextp = NULL; 2687 dmap = &tx_msg_ring[index++].buf_dma; 2688 #ifdef TX_MEM_DEBUG 2689 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2690 "==> nxge_map_txdma_channel_buf_ring: j %d" 2691 "dmap $%p", i, dmap)); 2692 #endif 2693 nxge_setup_dma_common(dmap, dma_bufp, 1, 2694 bsize); 2695 } 2696 } 2697 2698 if (i < num_chunks) { 2699 status = NXGE_ERROR; 2700 goto nxge_map_txdma_channel_buf_ring_fail1; 2701 } 2702 2703 *tx_desc_p = tx_ring_p; 2704 2705 goto nxge_map_txdma_channel_buf_ring_exit; 2706 2707 nxge_map_txdma_channel_buf_ring_fail1: 2708 if (tx_ring_p->serial) { 2709 nxge_serialize_destroy(tx_ring_p->serial); 2710 tx_ring_p->serial = NULL; 2711 } 2712 2713 index--; 2714 for (; index >= 0; index--) { 2715 if (tx_msg_ring[index].dma_handle != NULL) { 2716 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2717 } 2718 } 2719 2720 MUTEX_DESTROY(&tx_ring_p->freelock); 2721 MUTEX_DESTROY(&tx_ring_p->lock); 2722 KMEM_FREE(tx_msg_ring, size); 2723 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2724 2725 status = NXGE_ERROR; 2726 2727 nxge_map_txdma_channel_buf_ring_exit: 2728 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2729 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2730 2731 return (status); 2732 } 2733 2734 /*ARGSUSED*/ 2735 static void 2736 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2737 { 2738 p_tx_msg_t tx_msg_ring; 2739 p_tx_msg_t tx_msg_p; 2740 int i; 2741 2742 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2743 "==> nxge_unmap_txdma_channel_buf_ring")); 2744 if (tx_ring_p == NULL) { 2745 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2746 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2747 return; 2748 } 2749 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2750 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2751 tx_ring_p->tdc)); 2752 2753 tx_msg_ring = tx_ring_p->tx_msg_ring; 2754 2755 /* 2756 * Since the serialization thread, timer thread and 2757 * interrupt thread can all call the transmit reclaim, 2758 * the unmapping function needs to acquire the lock 2759 * to free those buffers which were transmitted 2760 * by the hardware already. 2761 */ 2762 MUTEX_ENTER(&tx_ring_p->lock); 2763 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2764 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2765 "channel %d", 2766 tx_ring_p->tdc)); 2767 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2768 2769 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2770 tx_msg_p = &tx_msg_ring[i]; 2771 if (tx_msg_p->tx_message != NULL) { 2772 freemsg(tx_msg_p->tx_message); 2773 tx_msg_p->tx_message = NULL; 2774 } 2775 } 2776 2777 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2778 if (tx_msg_ring[i].dma_handle != NULL) { 2779 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2780 } 2781 tx_msg_ring[i].dma_handle = NULL; 2782 } 2783 2784 MUTEX_EXIT(&tx_ring_p->lock); 2785 2786 if (tx_ring_p->serial) { 2787 nxge_serialize_destroy(tx_ring_p->serial); 2788 tx_ring_p->serial = NULL; 2789 } 2790 2791 MUTEX_DESTROY(&tx_ring_p->freelock); 2792 MUTEX_DESTROY(&tx_ring_p->lock); 2793 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2794 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2795 2796 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2797 "<== nxge_unmap_txdma_channel_buf_ring")); 2798 } 2799 2800 static nxge_status_t 2801 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2802 { 2803 p_tx_rings_t tx_rings; 2804 p_tx_ring_t *tx_desc_rings; 2805 p_tx_mbox_areas_t tx_mbox_areas_p; 2806 p_tx_mbox_t *tx_mbox_p; 2807 nxge_status_t status = NXGE_OK; 2808 2809 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2810 2811 tx_rings = nxgep->tx_rings; 2812 if (tx_rings == NULL) { 2813 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2814 "<== nxge_txdma_hw_start: NULL ring pointer")); 2815 return (NXGE_ERROR); 2816 } 2817 tx_desc_rings = tx_rings->rings; 2818 if (tx_desc_rings == NULL) { 2819 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2820 "<== nxge_txdma_hw_start: NULL ring pointers")); 2821 return (NXGE_ERROR); 2822 } 2823 2824 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2825 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2826 2827 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2828 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2829 2830 status = nxge_txdma_start_channel(nxgep, channel, 2831 (p_tx_ring_t)tx_desc_rings[channel], 2832 (p_tx_mbox_t)tx_mbox_p[channel]); 2833 if (status != NXGE_OK) { 2834 goto nxge_txdma_hw_start_fail1; 2835 } 2836 2837 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2838 "tx_rings $%p rings $%p", 2839 nxgep->tx_rings, nxgep->tx_rings->rings)); 2840 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2841 "tx_rings $%p tx_desc_rings $%p", 2842 nxgep->tx_rings, tx_desc_rings)); 2843 2844 goto nxge_txdma_hw_start_exit; 2845 2846 nxge_txdma_hw_start_fail1: 2847 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2848 "==> nxge_txdma_hw_start: disable " 2849 "(status 0x%x channel %d)", status, channel)); 2850 2851 nxge_txdma_hw_start_exit: 2852 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2853 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2854 2855 return (status); 2856 } 2857 2858 /* 2859 * nxge_txdma_start_channel 2860 * 2861 * Start a TDC. 2862 * 2863 * Arguments: 2864 * nxgep 2865 * channel The channel to start. 2866 * tx_ring_p channel's transmit descriptor ring. 2867 * tx_mbox_p channel' smailbox. 2868 * 2869 * Notes: 2870 * 2871 * NPI/NXGE function calls: 2872 * nxge_reset_txdma_channel() 2873 * nxge_init_txdma_channel_event_mask() 2874 * nxge_enable_txdma_channel() 2875 * 2876 * Registers accessed: 2877 * none directly (see functions above). 2878 * 2879 * Context: 2880 * Any domain 2881 */ 2882 static nxge_status_t 2883 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2884 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2885 2886 { 2887 nxge_status_t status = NXGE_OK; 2888 2889 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2890 "==> nxge_txdma_start_channel (channel %d)", channel)); 2891 /* 2892 * TXDMA/TXC must be in stopped state. 2893 */ 2894 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2895 2896 /* 2897 * Reset TXDMA channel 2898 */ 2899 tx_ring_p->tx_cs.value = 0; 2900 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2901 status = nxge_reset_txdma_channel(nxgep, channel, 2902 tx_ring_p->tx_cs.value); 2903 if (status != NXGE_OK) { 2904 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2905 "==> nxge_txdma_start_channel (channel %d)" 2906 " reset channel failed 0x%x", channel, status)); 2907 goto nxge_txdma_start_channel_exit; 2908 } 2909 2910 /* 2911 * Initialize the TXDMA channel specific FZC control 2912 * configurations. These FZC registers are pertaining 2913 * to each TX channel (i.e. logical pages). 2914 */ 2915 if (!isLDOMguest(nxgep)) { 2916 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2917 tx_ring_p, tx_mbox_p); 2918 if (status != NXGE_OK) { 2919 goto nxge_txdma_start_channel_exit; 2920 } 2921 } 2922 2923 /* 2924 * Initialize the event masks. 2925 */ 2926 tx_ring_p->tx_evmask.value = 0; 2927 status = nxge_init_txdma_channel_event_mask(nxgep, 2928 channel, &tx_ring_p->tx_evmask); 2929 if (status != NXGE_OK) { 2930 goto nxge_txdma_start_channel_exit; 2931 } 2932 2933 /* 2934 * Load TXDMA descriptors, buffers, mailbox, 2935 * initialise the DMA channels and 2936 * enable each DMA channel. 2937 */ 2938 status = nxge_enable_txdma_channel(nxgep, channel, 2939 tx_ring_p, tx_mbox_p); 2940 if (status != NXGE_OK) { 2941 goto nxge_txdma_start_channel_exit; 2942 } 2943 2944 nxge_txdma_start_channel_exit: 2945 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2946 2947 return (status); 2948 } 2949 2950 /* 2951 * nxge_txdma_stop_channel 2952 * 2953 * Stop a TDC. 2954 * 2955 * Arguments: 2956 * nxgep 2957 * channel The channel to stop. 2958 * tx_ring_p channel's transmit descriptor ring. 2959 * tx_mbox_p channel' smailbox. 2960 * 2961 * Notes: 2962 * 2963 * NPI/NXGE function calls: 2964 * nxge_txdma_stop_inj_err() 2965 * nxge_reset_txdma_channel() 2966 * nxge_init_txdma_channel_event_mask() 2967 * nxge_init_txdma_channel_cntl_stat() 2968 * nxge_disable_txdma_channel() 2969 * 2970 * Registers accessed: 2971 * none directly (see functions above). 2972 * 2973 * Context: 2974 * Any domain 2975 */ 2976 /*ARGSUSED*/ 2977 static nxge_status_t 2978 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2979 { 2980 p_tx_ring_t tx_ring_p; 2981 int status = NXGE_OK; 2982 2983 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2984 "==> nxge_txdma_stop_channel: channel %d", channel)); 2985 2986 /* 2987 * Stop (disable) TXDMA and TXC (if stop bit is set 2988 * and STOP_N_GO bit not set, the TXDMA reset state will 2989 * not be set if reset TXDMA. 2990 */ 2991 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2992 2993 tx_ring_p = nxgep->tx_rings->rings[channel]; 2994 2995 /* 2996 * Reset TXDMA channel 2997 */ 2998 tx_ring_p->tx_cs.value = 0; 2999 tx_ring_p->tx_cs.bits.ldw.rst = 1; 3000 status = nxge_reset_txdma_channel(nxgep, channel, 3001 tx_ring_p->tx_cs.value); 3002 if (status != NXGE_OK) { 3003 goto nxge_txdma_stop_channel_exit; 3004 } 3005 3006 #ifdef HARDWARE_REQUIRED 3007 /* Set up the interrupt event masks. */ 3008 tx_ring_p->tx_evmask.value = 0; 3009 status = nxge_init_txdma_channel_event_mask(nxgep, 3010 channel, &tx_ring_p->tx_evmask); 3011 if (status != NXGE_OK) { 3012 goto nxge_txdma_stop_channel_exit; 3013 } 3014 3015 /* Initialize the DMA control and status register */ 3016 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 3017 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 3018 tx_ring_p->tx_cs.value); 3019 if (status != NXGE_OK) { 3020 goto nxge_txdma_stop_channel_exit; 3021 } 3022 3023 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 3024 3025 /* Disable channel */ 3026 status = nxge_disable_txdma_channel(nxgep, channel, 3027 tx_ring_p, tx_mbox_p); 3028 if (status != NXGE_OK) { 3029 goto nxge_txdma_start_channel_exit; 3030 } 3031 3032 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3033 "==> nxge_txdma_stop_channel: event done")); 3034 3035 #endif 3036 3037 nxge_txdma_stop_channel_exit: 3038 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 3039 return (status); 3040 } 3041 3042 /* 3043 * nxge_txdma_get_ring 3044 * 3045 * Get the ring for a TDC. 3046 * 3047 * Arguments: 3048 * nxgep 3049 * channel 3050 * 3051 * Notes: 3052 * 3053 * NPI/NXGE function calls: 3054 * 3055 * Registers accessed: 3056 * 3057 * Context: 3058 * Any domain 3059 */ 3060 static p_tx_ring_t 3061 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 3062 { 3063 nxge_grp_set_t *set = &nxgep->tx_set; 3064 int tdc; 3065 3066 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 3067 3068 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3069 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3070 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 3071 goto return_null; 3072 } 3073 3074 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3075 if ((1 << tdc) & set->owned.map) { 3076 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3077 if (ring) { 3078 if (channel == ring->tdc) { 3079 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3080 "<== nxge_txdma_get_ring: " 3081 "tdc %d ring $%p", tdc, ring)); 3082 return (ring); 3083 } 3084 } 3085 } 3086 } 3087 3088 return_null: 3089 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 3090 "ring not found")); 3091 3092 return (NULL); 3093 } 3094 3095 /* 3096 * nxge_txdma_get_mbox 3097 * 3098 * Get the mailbox for a TDC. 3099 * 3100 * Arguments: 3101 * nxgep 3102 * channel 3103 * 3104 * Notes: 3105 * 3106 * NPI/NXGE function calls: 3107 * 3108 * Registers accessed: 3109 * 3110 * Context: 3111 * Any domain 3112 */ 3113 static p_tx_mbox_t 3114 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3115 { 3116 nxge_grp_set_t *set = &nxgep->tx_set; 3117 int tdc; 3118 3119 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3120 3121 if (nxgep->tx_mbox_areas_p == 0 || 3122 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3123 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3124 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3125 goto return_null; 3126 } 3127 3128 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3129 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3130 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3131 goto return_null; 3132 } 3133 3134 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3135 if ((1 << tdc) & set->owned.map) { 3136 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3137 if (ring) { 3138 if (channel == ring->tdc) { 3139 tx_mbox_t *mailbox = nxgep-> 3140 tx_mbox_areas_p-> 3141 txmbox_areas_p[tdc]; 3142 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3143 "<== nxge_txdma_get_mbox: tdc %d " 3144 "ring $%p", tdc, mailbox)); 3145 return (mailbox); 3146 } 3147 } 3148 } 3149 } 3150 3151 return_null: 3152 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3153 "mailbox not found")); 3154 3155 return (NULL); 3156 } 3157 3158 /* 3159 * nxge_tx_err_evnts 3160 * 3161 * Recover a TDC. 3162 * 3163 * Arguments: 3164 * nxgep 3165 * index The index to the TDC ring. 3166 * ldvp Used to get the channel number ONLY. 3167 * cs A copy of the bits from TX_CS. 3168 * 3169 * Notes: 3170 * Calling tree: 3171 * nxge_tx_intr() 3172 * 3173 * NPI/NXGE function calls: 3174 * npi_txdma_ring_error_get() 3175 * npi_txdma_inj_par_error_get() 3176 * nxge_txdma_fatal_err_recover() 3177 * 3178 * Registers accessed: 3179 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3180 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3181 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3182 * 3183 * Context: 3184 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3185 */ 3186 /*ARGSUSED*/ 3187 static nxge_status_t 3188 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3189 { 3190 npi_handle_t handle; 3191 npi_status_t rs; 3192 uint8_t channel; 3193 p_tx_ring_t *tx_rings; 3194 p_tx_ring_t tx_ring_p; 3195 p_nxge_tx_ring_stats_t tdc_stats; 3196 boolean_t txchan_fatal = B_FALSE; 3197 nxge_status_t status = NXGE_OK; 3198 tdmc_inj_par_err_t par_err; 3199 uint32_t value; 3200 3201 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3202 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3203 channel = ldvp->channel; 3204 3205 tx_rings = nxgep->tx_rings->rings; 3206 tx_ring_p = tx_rings[index]; 3207 tdc_stats = tx_ring_p->tdc_stats; 3208 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3209 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3210 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3211 if ((rs = npi_txdma_ring_error_get(handle, channel, 3212 &tdc_stats->errlog)) != NPI_SUCCESS) 3213 return (NXGE_ERROR | rs); 3214 } 3215 3216 if (cs.bits.ldw.mbox_err) { 3217 tdc_stats->mbox_err++; 3218 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3219 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3221 "==> nxge_tx_err_evnts(channel %d): " 3222 "fatal error: mailbox", channel)); 3223 txchan_fatal = B_TRUE; 3224 } 3225 if (cs.bits.ldw.pkt_size_err) { 3226 tdc_stats->pkt_size_err++; 3227 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3228 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3230 "==> nxge_tx_err_evnts(channel %d): " 3231 "fatal error: pkt_size_err", channel)); 3232 txchan_fatal = B_TRUE; 3233 } 3234 if (cs.bits.ldw.tx_ring_oflow) { 3235 tdc_stats->tx_ring_oflow++; 3236 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3237 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3239 "==> nxge_tx_err_evnts(channel %d): " 3240 "fatal error: tx_ring_oflow", channel)); 3241 txchan_fatal = B_TRUE; 3242 } 3243 if (cs.bits.ldw.pref_buf_par_err) { 3244 tdc_stats->pre_buf_par_err++; 3245 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3246 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3248 "==> nxge_tx_err_evnts(channel %d): " 3249 "fatal error: pre_buf_par_err", channel)); 3250 /* Clear error injection source for parity error */ 3251 (void) npi_txdma_inj_par_error_get(handle, &value); 3252 par_err.value = value; 3253 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3254 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3255 txchan_fatal = B_TRUE; 3256 } 3257 if (cs.bits.ldw.nack_pref) { 3258 tdc_stats->nack_pref++; 3259 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3260 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3262 "==> nxge_tx_err_evnts(channel %d): " 3263 "fatal error: nack_pref", channel)); 3264 txchan_fatal = B_TRUE; 3265 } 3266 if (cs.bits.ldw.nack_pkt_rd) { 3267 tdc_stats->nack_pkt_rd++; 3268 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3269 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3270 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3271 "==> nxge_tx_err_evnts(channel %d): " 3272 "fatal error: nack_pkt_rd", channel)); 3273 txchan_fatal = B_TRUE; 3274 } 3275 if (cs.bits.ldw.conf_part_err) { 3276 tdc_stats->conf_part_err++; 3277 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3278 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3280 "==> nxge_tx_err_evnts(channel %d): " 3281 "fatal error: config_partition_err", channel)); 3282 txchan_fatal = B_TRUE; 3283 } 3284 if (cs.bits.ldw.pkt_prt_err) { 3285 tdc_stats->pkt_part_err++; 3286 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3287 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3288 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3289 "==> nxge_tx_err_evnts(channel %d): " 3290 "fatal error: pkt_prt_err", channel)); 3291 txchan_fatal = B_TRUE; 3292 } 3293 3294 /* Clear error injection source in case this is an injected error */ 3295 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3296 3297 if (txchan_fatal) { 3298 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3299 " nxge_tx_err_evnts: " 3300 " fatal error on channel %d cs 0x%llx\n", 3301 channel, cs.value)); 3302 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3303 tx_ring_p); 3304 if (status == NXGE_OK) { 3305 FM_SERVICE_RESTORED(nxgep); 3306 } 3307 } 3308 3309 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3310 3311 return (status); 3312 } 3313 3314 static nxge_status_t 3315 nxge_txdma_fatal_err_recover( 3316 p_nxge_t nxgep, 3317 uint16_t channel, 3318 p_tx_ring_t tx_ring_p) 3319 { 3320 npi_handle_t handle; 3321 npi_status_t rs = NPI_SUCCESS; 3322 p_tx_mbox_t tx_mbox_p; 3323 nxge_status_t status = NXGE_OK; 3324 3325 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3327 "Recovering from TxDMAChannel#%d error...", channel)); 3328 3329 /* 3330 * Stop the dma channel waits for the stop done. 3331 * If the stop done bit is not set, then create 3332 * an error. 3333 */ 3334 3335 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3336 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3337 MUTEX_ENTER(&tx_ring_p->lock); 3338 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3339 if (rs != NPI_SUCCESS) { 3340 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3341 "==> nxge_txdma_fatal_err_recover (channel %d): " 3342 "stop failed ", channel)); 3343 goto fail; 3344 } 3345 3346 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3347 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3348 3349 /* 3350 * Reset TXDMA channel 3351 */ 3352 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3353 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3354 NPI_SUCCESS) { 3355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3356 "==> nxge_txdma_fatal_err_recover (channel %d)" 3357 " reset channel failed 0x%x", channel, rs)); 3358 goto fail; 3359 } 3360 3361 /* 3362 * Reset the tail (kick) register to 0. 3363 * (Hardware will not reset it. Tx overflow fatal 3364 * error if tail is not set to 0 after reset! 3365 */ 3366 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3367 3368 /* Restart TXDMA channel */ 3369 3370 if (!isLDOMguest(nxgep)) { 3371 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3372 3373 // XXX This is a problem in HIO! 3374 /* 3375 * Initialize the TXDMA channel specific FZC control 3376 * configurations. These FZC registers are pertaining 3377 * to each TX channel (i.e. logical pages). 3378 */ 3379 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3380 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3381 tx_ring_p, tx_mbox_p); 3382 if (status != NXGE_OK) 3383 goto fail; 3384 } 3385 3386 /* 3387 * Initialize the event masks. 3388 */ 3389 tx_ring_p->tx_evmask.value = 0; 3390 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3391 &tx_ring_p->tx_evmask); 3392 if (status != NXGE_OK) 3393 goto fail; 3394 3395 tx_ring_p->wr_index_wrap = B_FALSE; 3396 tx_ring_p->wr_index = 0; 3397 tx_ring_p->rd_index = 0; 3398 3399 /* 3400 * Load TXDMA descriptors, buffers, mailbox, 3401 * initialise the DMA channels and 3402 * enable each DMA channel. 3403 */ 3404 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3405 status = nxge_enable_txdma_channel(nxgep, channel, 3406 tx_ring_p, tx_mbox_p); 3407 MUTEX_EXIT(&tx_ring_p->lock); 3408 if (status != NXGE_OK) 3409 goto fail; 3410 3411 nxge_txdma_freemsg_task(tx_ring_p); 3412 3413 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3414 "Recovery Successful, TxDMAChannel#%d Restored", 3415 channel)); 3416 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3417 3418 return (NXGE_OK); 3419 3420 fail: 3421 MUTEX_EXIT(&tx_ring_p->lock); 3422 3423 nxge_txdma_freemsg_task(tx_ring_p); 3424 3425 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3426 "nxge_txdma_fatal_err_recover (channel %d): " 3427 "failed to recover this txdma channel", channel)); 3428 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3429 3430 return (status); 3431 } 3432 3433 /* 3434 * nxge_tx_port_fatal_err_recover 3435 * 3436 * Attempt to recover from a fatal port error. 3437 * 3438 * Arguments: 3439 * nxgep 3440 * 3441 * Notes: 3442 * How would a guest do this? 3443 * 3444 * NPI/NXGE function calls: 3445 * 3446 * Registers accessed: 3447 * 3448 * Context: 3449 * Service domain 3450 */ 3451 nxge_status_t 3452 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3453 { 3454 nxge_grp_set_t *set = &nxgep->tx_set; 3455 nxge_channel_t tdc; 3456 3457 tx_ring_t *ring; 3458 tx_mbox_t *mailbox; 3459 3460 npi_handle_t handle; 3461 nxge_status_t status; 3462 npi_status_t rs; 3463 3464 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3465 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3466 "Recovering from TxPort error...")); 3467 3468 if (isLDOMguest(nxgep)) { 3469 return (NXGE_OK); 3470 } 3471 3472 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3473 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3474 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3475 return (NXGE_ERROR); 3476 } 3477 3478 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3479 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3480 "<== nxge_tx_port_fatal_err_recover: " 3481 "NULL ring pointer(s)")); 3482 return (NXGE_ERROR); 3483 } 3484 3485 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3486 if ((1 << tdc) & set->owned.map) { 3487 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3488 if (ring) 3489 MUTEX_ENTER(&ring->lock); 3490 } 3491 } 3492 3493 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3494 3495 /* 3496 * Stop all the TDCs owned by us. 3497 * (The shared TDCs will have been stopped by their owners.) 3498 */ 3499 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3500 if ((1 << tdc) & set->owned.map) { 3501 ring = nxgep->tx_rings->rings[tdc]; 3502 if (ring) { 3503 rs = npi_txdma_channel_control 3504 (handle, TXDMA_STOP, tdc); 3505 if (rs != NPI_SUCCESS) { 3506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3507 "nxge_tx_port_fatal_err_recover " 3508 "(channel %d): stop failed ", tdc)); 3509 goto fail; 3510 } 3511 } 3512 } 3513 } 3514 3515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3516 3517 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3518 if ((1 << tdc) & set->owned.map) { 3519 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3520 if (ring) { 3521 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3522 nxge_txdma_freemsg_task(ring); 3523 } 3524 } 3525 } 3526 3527 /* 3528 * Reset all the TDCs. 3529 */ 3530 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3531 3532 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3533 if ((1 << tdc) & set->owned.map) { 3534 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3535 if (ring) { 3536 if ((rs = npi_txdma_channel_control 3537 (handle, TXDMA_RESET, tdc)) 3538 != NPI_SUCCESS) { 3539 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3540 "nxge_tx_port_fatal_err_recover " 3541 "(channel %d) reset channel " 3542 "failed 0x%x", tdc, rs)); 3543 goto fail; 3544 } 3545 } 3546 /* 3547 * Reset the tail (kick) register to 0. 3548 * (Hardware will not reset it. Tx overflow fatal 3549 * error if tail is not set to 0 after reset! 3550 */ 3551 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3552 } 3553 } 3554 3555 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3556 3557 /* Restart all the TDCs */ 3558 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3559 if ((1 << tdc) & set->owned.map) { 3560 ring = nxgep->tx_rings->rings[tdc]; 3561 if (ring) { 3562 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3563 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3564 ring, mailbox); 3565 ring->tx_evmask.value = 0; 3566 /* 3567 * Initialize the event masks. 3568 */ 3569 status = nxge_init_txdma_channel_event_mask 3570 (nxgep, tdc, &ring->tx_evmask); 3571 3572 ring->wr_index_wrap = B_FALSE; 3573 ring->wr_index = 0; 3574 ring->rd_index = 0; 3575 3576 if (status != NXGE_OK) 3577 goto fail; 3578 if (status != NXGE_OK) 3579 goto fail; 3580 } 3581 } 3582 } 3583 3584 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3585 3586 /* Re-enable all the TDCs */ 3587 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3588 if ((1 << tdc) & set->owned.map) { 3589 ring = nxgep->tx_rings->rings[tdc]; 3590 if (ring) { 3591 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3592 status = nxge_enable_txdma_channel(nxgep, tdc, 3593 ring, mailbox); 3594 if (status != NXGE_OK) 3595 goto fail; 3596 } 3597 } 3598 } 3599 3600 /* 3601 * Unlock all the TDCs. 3602 */ 3603 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3604 if ((1 << tdc) & set->owned.map) { 3605 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3606 if (ring) 3607 MUTEX_EXIT(&ring->lock); 3608 } 3609 } 3610 3611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3612 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3613 3614 return (NXGE_OK); 3615 3616 fail: 3617 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3618 if ((1 << tdc) & set->owned.map) { 3619 ring = nxgep->tx_rings->rings[tdc]; 3620 if (ring) 3621 MUTEX_EXIT(&ring->lock); 3622 } 3623 } 3624 3625 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3626 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3627 3628 return (status); 3629 } 3630 3631 /* 3632 * nxge_txdma_inject_err 3633 * 3634 * Inject an error into a TDC. 3635 * 3636 * Arguments: 3637 * nxgep 3638 * err_id The error to inject. 3639 * chan The channel to inject into. 3640 * 3641 * Notes: 3642 * This is called from nxge_main.c:nxge_err_inject() 3643 * Has this ioctl ever been used? 3644 * 3645 * NPI/NXGE function calls: 3646 * npi_txdma_inj_par_error_get() 3647 * npi_txdma_inj_par_error_set() 3648 * 3649 * Registers accessed: 3650 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3651 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3652 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3653 * 3654 * Context: 3655 * Service domain 3656 */ 3657 void 3658 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3659 { 3660 tdmc_intr_dbg_t tdi; 3661 tdmc_inj_par_err_t par_err; 3662 uint32_t value; 3663 npi_handle_t handle; 3664 3665 switch (err_id) { 3666 3667 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3668 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3669 /* Clear error injection source for parity error */ 3670 (void) npi_txdma_inj_par_error_get(handle, &value); 3671 par_err.value = value; 3672 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3673 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3674 3675 par_err.bits.ldw.inject_parity_error = (1 << chan); 3676 (void) npi_txdma_inj_par_error_get(handle, &value); 3677 par_err.value = value; 3678 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3679 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3680 (unsigned long long)par_err.value); 3681 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3682 break; 3683 3684 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3685 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3686 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3687 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3688 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3689 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3690 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3691 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3692 chan, &tdi.value); 3693 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3694 tdi.bits.ldw.pref_buf_par_err = 1; 3695 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3696 tdi.bits.ldw.mbox_err = 1; 3697 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3698 tdi.bits.ldw.nack_pref = 1; 3699 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3700 tdi.bits.ldw.nack_pkt_rd = 1; 3701 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3702 tdi.bits.ldw.pkt_size_err = 1; 3703 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3704 tdi.bits.ldw.tx_ring_oflow = 1; 3705 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3706 tdi.bits.ldw.conf_part_err = 1; 3707 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3708 tdi.bits.ldw.pkt_part_err = 1; 3709 #if defined(__i386) 3710 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3711 tdi.value); 3712 #else 3713 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3714 tdi.value); 3715 #endif 3716 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3717 chan, tdi.value); 3718 3719 break; 3720 } 3721 } 3722