1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_txdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 #include <npi_tx_rd64.h> 30 #include <npi_tx_wr64.h> 31 #include <sys/llc1.h> 32 33 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 34 uint32_t nxge_tx_minfree = 64; 35 uint32_t nxge_tx_intr_thres = 0; 36 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 37 uint32_t nxge_tx_tiny_pack = 1; 38 uint32_t nxge_tx_use_bcopy = 1; 39 40 extern uint32_t nxge_tx_ring_size; 41 extern uint32_t nxge_bcopy_thresh; 42 extern uint32_t nxge_dvma_thresh; 43 extern uint32_t nxge_dma_stream_thresh; 44 extern dma_method_t nxge_force_dma; 45 extern uint32_t nxge_cksum_offload; 46 47 /* Device register access attributes for PIO. */ 48 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 49 /* Device descriptor access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 51 /* Device buffer access attributes for DMA. */ 52 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 53 extern ddi_dma_attr_t nxge_desc_dma_attr; 54 extern ddi_dma_attr_t nxge_tx_dma_attr; 55 56 extern void nxge_tx_ring_task(void *arg); 57 58 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 59 60 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 67 68 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 69 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 70 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 71 72 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 73 p_nxge_dma_common_t *, p_tx_ring_t, 74 p_tx_mbox_t *); 75 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 76 p_tx_ring_t, p_tx_mbox_t); 77 78 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 79 p_tx_ring_t, p_tx_mbox_t); 80 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 81 82 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 83 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 84 p_nxge_ldv_t, tx_cs_t); 85 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 86 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 87 uint16_t, p_tx_ring_t); 88 89 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 90 p_tx_ring_t ring_p, uint16_t channel); 91 92 nxge_status_t 93 nxge_init_txdma_channels(p_nxge_t nxgep) 94 { 95 nxge_grp_set_t *set = &nxgep->tx_set; 96 int i, tdc, count; 97 nxge_grp_t *group; 98 dc_map_t map; 99 int dev_gindex; 100 101 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 102 103 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 104 if ((1 << i) & set->lg.map) { 105 group = set->group[i]; 106 dev_gindex = 107 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 108 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 109 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 110 if ((1 << tdc) & map) { 111 if ((nxge_grp_dc_add(nxgep, 112 group, VP_BOUND_TX, tdc))) 113 goto init_txdma_channels_exit; 114 } 115 } 116 } 117 if (++count == set->lg.count) 118 break; 119 } 120 121 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 122 return (NXGE_OK); 123 124 init_txdma_channels_exit: 125 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 126 if ((1 << i) & set->lg.map) { 127 group = set->group[i]; 128 dev_gindex = 129 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 130 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 131 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 132 if ((1 << tdc) & map) { 133 nxge_grp_dc_remove(nxgep, 134 VP_BOUND_TX, tdc); 135 } 136 } 137 } 138 if (++count == set->lg.count) 139 break; 140 } 141 142 return (NXGE_ERROR); 143 144 } 145 146 nxge_status_t 147 nxge_init_txdma_channel( 148 p_nxge_t nxge, 149 int channel) 150 { 151 nxge_status_t status; 152 153 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 154 155 status = nxge_map_txdma(nxge, channel); 156 if (status != NXGE_OK) { 157 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 158 "<== nxge_init_txdma_channel: status 0x%x", status)); 159 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 160 return (status); 161 } 162 163 status = nxge_txdma_hw_start(nxge, channel); 164 if (status != NXGE_OK) { 165 (void) nxge_unmap_txdma_channel(nxge, channel); 166 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 167 return (status); 168 } 169 170 if (!nxge->statsp->tdc_ksp[channel]) 171 nxge_setup_tdc_kstats(nxge, channel); 172 173 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 174 175 return (status); 176 } 177 178 void 179 nxge_uninit_txdma_channels(p_nxge_t nxgep) 180 { 181 nxge_grp_set_t *set = &nxgep->tx_set; 182 int tdc; 183 184 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 185 186 if (set->owned.map == 0) { 187 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 188 "nxge_uninit_txdma_channels: no channels")); 189 return; 190 } 191 192 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 193 if ((1 << tdc) & set->owned.map) { 194 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 195 } 196 } 197 198 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 199 } 200 201 void 202 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 203 { 204 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 205 206 if (nxgep->statsp->tdc_ksp[channel]) { 207 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 208 nxgep->statsp->tdc_ksp[channel] = 0; 209 } 210 211 (void) nxge_txdma_stop_channel(nxgep, channel); 212 nxge_unmap_txdma_channel(nxgep, channel); 213 214 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 215 "<== nxge_uninit_txdma_channel")); 216 } 217 218 void 219 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 220 uint32_t entries, uint32_t size) 221 { 222 size_t tsize; 223 *dest_p = *src_p; 224 tsize = size * entries; 225 dest_p->alength = tsize; 226 dest_p->nblocks = entries; 227 dest_p->block_size = size; 228 dest_p->offset += tsize; 229 230 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 231 src_p->alength -= tsize; 232 src_p->dma_cookie.dmac_laddress += tsize; 233 src_p->dma_cookie.dmac_size -= tsize; 234 } 235 236 /* 237 * nxge_reset_txdma_channel 238 * 239 * Reset a TDC. 240 * 241 * Arguments: 242 * nxgep 243 * channel The channel to reset. 244 * reg_data The current TX_CS. 245 * 246 * Notes: 247 * 248 * NPI/NXGE function calls: 249 * npi_txdma_channel_reset() 250 * npi_txdma_channel_control() 251 * 252 * Registers accessed: 253 * TX_CS DMC+0x40028 Transmit Control And Status 254 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 255 * 256 * Context: 257 * Any domain 258 */ 259 nxge_status_t 260 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 261 { 262 npi_status_t rs = NPI_SUCCESS; 263 nxge_status_t status = NXGE_OK; 264 npi_handle_t handle; 265 266 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 267 268 handle = NXGE_DEV_NPI_HANDLE(nxgep); 269 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 270 rs = npi_txdma_channel_reset(handle, channel); 271 } else { 272 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 273 channel); 274 } 275 276 if (rs != NPI_SUCCESS) { 277 status = NXGE_ERROR | rs; 278 } 279 280 /* 281 * Reset the tail (kick) register to 0. 282 * (Hardware will not reset it. Tx overflow fatal 283 * error if tail is not set to 0 after reset! 284 */ 285 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 286 287 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 288 return (status); 289 } 290 291 /* 292 * nxge_init_txdma_channel_event_mask 293 * 294 * Enable interrupts for a set of events. 295 * 296 * Arguments: 297 * nxgep 298 * channel The channel to map. 299 * mask_p The events to enable. 300 * 301 * Notes: 302 * 303 * NPI/NXGE function calls: 304 * npi_txdma_event_mask() 305 * 306 * Registers accessed: 307 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 308 * 309 * Context: 310 * Any domain 311 */ 312 nxge_status_t 313 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 314 p_tx_dma_ent_msk_t mask_p) 315 { 316 npi_handle_t handle; 317 npi_status_t rs = NPI_SUCCESS; 318 nxge_status_t status = NXGE_OK; 319 320 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 321 "<== nxge_init_txdma_channel_event_mask")); 322 323 handle = NXGE_DEV_NPI_HANDLE(nxgep); 324 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 325 if (rs != NPI_SUCCESS) { 326 status = NXGE_ERROR | rs; 327 } 328 329 return (status); 330 } 331 332 /* 333 * nxge_init_txdma_channel_cntl_stat 334 * 335 * Stop a TDC. If at first we don't succeed, inject an error. 336 * 337 * Arguments: 338 * nxgep 339 * channel The channel to stop. 340 * 341 * Notes: 342 * 343 * NPI/NXGE function calls: 344 * npi_txdma_control_status() 345 * 346 * Registers accessed: 347 * TX_CS DMC+0x40028 Transmit Control And Status 348 * 349 * Context: 350 * Any domain 351 */ 352 nxge_status_t 353 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 354 uint64_t reg_data) 355 { 356 npi_handle_t handle; 357 npi_status_t rs = NPI_SUCCESS; 358 nxge_status_t status = NXGE_OK; 359 360 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 361 "<== nxge_init_txdma_channel_cntl_stat")); 362 363 handle = NXGE_DEV_NPI_HANDLE(nxgep); 364 rs = npi_txdma_control_status(handle, OP_SET, channel, 365 (p_tx_cs_t)®_data); 366 367 if (rs != NPI_SUCCESS) { 368 status = NXGE_ERROR | rs; 369 } 370 371 return (status); 372 } 373 374 /* 375 * nxge_enable_txdma_channel 376 * 377 * Enable a TDC. 378 * 379 * Arguments: 380 * nxgep 381 * channel The channel to enable. 382 * tx_desc_p channel's transmit descriptor ring. 383 * mbox_p channel's mailbox, 384 * 385 * Notes: 386 * 387 * NPI/NXGE function calls: 388 * npi_txdma_ring_config() 389 * npi_txdma_mbox_config() 390 * npi_txdma_channel_init_enable() 391 * 392 * Registers accessed: 393 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 394 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 395 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 396 * TX_CS DMC+0x40028 Transmit Control And Status 397 * 398 * Context: 399 * Any domain 400 */ 401 nxge_status_t 402 nxge_enable_txdma_channel(p_nxge_t nxgep, 403 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 404 { 405 npi_handle_t handle; 406 npi_status_t rs = NPI_SUCCESS; 407 nxge_status_t status = NXGE_OK; 408 409 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 410 411 handle = NXGE_DEV_NPI_HANDLE(nxgep); 412 /* 413 * Use configuration data composed at init time. 414 * Write to hardware the transmit ring configurations. 415 */ 416 rs = npi_txdma_ring_config(handle, OP_SET, channel, 417 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 418 419 if (rs != NPI_SUCCESS) { 420 return (NXGE_ERROR | rs); 421 } 422 423 if (isLDOMguest(nxgep)) { 424 /* Add interrupt handler for this channel. */ 425 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 426 return (NXGE_ERROR); 427 } 428 429 /* Write to hardware the mailbox */ 430 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 431 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 432 433 if (rs != NPI_SUCCESS) { 434 return (NXGE_ERROR | rs); 435 } 436 437 /* Start the DMA engine. */ 438 rs = npi_txdma_channel_init_enable(handle, channel); 439 440 if (rs != NPI_SUCCESS) { 441 return (NXGE_ERROR | rs); 442 } 443 444 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 445 446 return (status); 447 } 448 449 void 450 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 451 boolean_t l4_cksum, int pkt_len, uint8_t npads, 452 p_tx_pkt_hdr_all_t pkthdrp, 453 t_uscalar_t start_offset, 454 t_uscalar_t stuff_offset) 455 { 456 p_tx_pkt_header_t hdrp; 457 p_mblk_t nmp; 458 uint64_t tmp; 459 size_t mblk_len; 460 size_t iph_len; 461 size_t hdrs_size; 462 uint8_t hdrs_buf[sizeof (struct ether_header) + 463 64 + sizeof (uint32_t)]; 464 uint8_t *cursor; 465 uint8_t *ip_buf; 466 uint16_t eth_type; 467 uint8_t ipproto; 468 boolean_t is_vlan = B_FALSE; 469 size_t eth_hdr_size; 470 471 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 472 473 /* 474 * Caller should zero out the headers first. 475 */ 476 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 477 478 if (fill_len) { 479 NXGE_DEBUG_MSG((NULL, TX_CTL, 480 "==> nxge_fill_tx_hdr: pkt_len %d " 481 "npads %d", pkt_len, npads)); 482 tmp = (uint64_t)pkt_len; 483 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 484 goto fill_tx_header_done; 485 } 486 487 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 488 489 /* 490 * mp is the original data packet (does not include the 491 * Neptune transmit header). 492 */ 493 nmp = mp; 494 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 495 "mp $%p b_rptr $%p len %d", 496 mp, nmp->b_rptr, MBLKL(nmp))); 497 /* copy ether_header from mblk to hdrs_buf */ 498 cursor = &hdrs_buf[0]; 499 tmp = sizeof (struct ether_vlan_header); 500 while ((nmp != NULL) && (tmp > 0)) { 501 size_t buflen; 502 mblk_len = MBLKL(nmp); 503 buflen = min((size_t)tmp, mblk_len); 504 bcopy(nmp->b_rptr, cursor, buflen); 505 cursor += buflen; 506 tmp -= buflen; 507 nmp = nmp->b_cont; 508 } 509 510 nmp = mp; 511 mblk_len = MBLKL(nmp); 512 ip_buf = NULL; 513 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 514 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 515 "ether type 0x%x", eth_type, hdrp->value)); 516 517 if (eth_type < ETHERMTU) { 518 tmp = 1ull; 519 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 520 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 521 "value 0x%llx", hdrp->value)); 522 if (*(hdrs_buf + sizeof (struct ether_header)) 523 == LLC_SNAP_SAP) { 524 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 525 sizeof (struct ether_header) + 6))); 526 NXGE_DEBUG_MSG((NULL, TX_CTL, 527 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 528 eth_type)); 529 } else { 530 goto fill_tx_header_done; 531 } 532 } else if (eth_type == VLAN_ETHERTYPE) { 533 tmp = 1ull; 534 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 535 536 eth_type = ntohs(((struct ether_vlan_header *) 537 hdrs_buf)->ether_type); 538 is_vlan = B_TRUE; 539 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 540 "value 0x%llx", hdrp->value)); 541 } 542 543 if (!is_vlan) { 544 eth_hdr_size = sizeof (struct ether_header); 545 } else { 546 eth_hdr_size = sizeof (struct ether_vlan_header); 547 } 548 549 switch (eth_type) { 550 case ETHERTYPE_IP: 551 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 552 ip_buf = nmp->b_rptr + eth_hdr_size; 553 mblk_len -= eth_hdr_size; 554 iph_len = ((*ip_buf) & 0x0f); 555 if (mblk_len > (iph_len + sizeof (uint32_t))) { 556 ip_buf = nmp->b_rptr; 557 ip_buf += eth_hdr_size; 558 } else { 559 ip_buf = NULL; 560 } 561 562 } 563 if (ip_buf == NULL) { 564 hdrs_size = 0; 565 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 566 while ((nmp) && (hdrs_size < 567 sizeof (hdrs_buf))) { 568 mblk_len = (size_t)nmp->b_wptr - 569 (size_t)nmp->b_rptr; 570 if (mblk_len >= 571 (sizeof (hdrs_buf) - hdrs_size)) 572 mblk_len = sizeof (hdrs_buf) - 573 hdrs_size; 574 bcopy(nmp->b_rptr, 575 &hdrs_buf[hdrs_size], mblk_len); 576 hdrs_size += mblk_len; 577 nmp = nmp->b_cont; 578 } 579 ip_buf = hdrs_buf; 580 ip_buf += eth_hdr_size; 581 iph_len = ((*ip_buf) & 0x0f); 582 } 583 584 ipproto = ip_buf[9]; 585 586 tmp = (uint64_t)iph_len; 587 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 588 tmp = (uint64_t)(eth_hdr_size >> 1); 589 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 590 591 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 592 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 593 "tmp 0x%x", 594 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 595 ipproto, tmp)); 596 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 597 "value 0x%llx", hdrp->value)); 598 599 break; 600 601 case ETHERTYPE_IPV6: 602 hdrs_size = 0; 603 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 604 while ((nmp) && (hdrs_size < 605 sizeof (hdrs_buf))) { 606 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 607 if (mblk_len >= 608 (sizeof (hdrs_buf) - hdrs_size)) 609 mblk_len = sizeof (hdrs_buf) - 610 hdrs_size; 611 bcopy(nmp->b_rptr, 612 &hdrs_buf[hdrs_size], mblk_len); 613 hdrs_size += mblk_len; 614 nmp = nmp->b_cont; 615 } 616 ip_buf = hdrs_buf; 617 ip_buf += eth_hdr_size; 618 619 tmp = 1ull; 620 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 621 622 tmp = (eth_hdr_size >> 1); 623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 624 625 /* byte 6 is the next header protocol */ 626 ipproto = ip_buf[6]; 627 628 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 629 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 630 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 631 ipproto)); 632 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 633 "value 0x%llx", hdrp->value)); 634 635 break; 636 637 default: 638 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 639 goto fill_tx_header_done; 640 } 641 642 switch (ipproto) { 643 case IPPROTO_TCP: 644 NXGE_DEBUG_MSG((NULL, TX_CTL, 645 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 646 if (l4_cksum) { 647 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 648 hdrp->value |= 649 (((uint64_t)(start_offset >> 1)) << 650 TX_PKT_HEADER_L4START_SHIFT); 651 hdrp->value |= 652 (((uint64_t)(stuff_offset >> 1)) << 653 TX_PKT_HEADER_L4STUFF_SHIFT); 654 655 NXGE_DEBUG_MSG((NULL, TX_CTL, 656 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 657 "value 0x%llx", hdrp->value)); 658 } 659 660 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 661 "value 0x%llx", hdrp->value)); 662 break; 663 664 case IPPROTO_UDP: 665 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 666 if (l4_cksum) { 667 if (!nxge_cksum_offload) { 668 uint16_t *up; 669 uint16_t cksum; 670 t_uscalar_t stuff_len; 671 672 /* 673 * The checksum field has the 674 * partial checksum. 675 * IP_CSUM() macro calls ip_cksum() which 676 * can add in the partial checksum. 677 */ 678 cksum = IP_CSUM(mp, start_offset, 0); 679 stuff_len = stuff_offset; 680 nmp = mp; 681 mblk_len = MBLKL(nmp); 682 while ((nmp != NULL) && 683 (mblk_len < stuff_len)) { 684 stuff_len -= mblk_len; 685 nmp = nmp->b_cont; 686 } 687 ASSERT(nmp); 688 up = (uint16_t *)(nmp->b_rptr + stuff_len); 689 690 *up = cksum; 691 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 692 NXGE_DEBUG_MSG((NULL, TX_CTL, 693 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 694 "use sw cksum " 695 "write to $%p cksum 0x%x content up 0x%x", 696 stuff_len, 697 up, 698 cksum, 699 *up)); 700 } else { 701 /* Hardware will compute the full checksum */ 702 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 703 hdrp->value |= 704 (((uint64_t)(start_offset >> 1)) << 705 TX_PKT_HEADER_L4START_SHIFT); 706 hdrp->value |= 707 (((uint64_t)(stuff_offset >> 1)) << 708 TX_PKT_HEADER_L4STUFF_SHIFT); 709 710 NXGE_DEBUG_MSG((NULL, TX_CTL, 711 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 712 " use partial checksum " 713 "cksum 0x%x ", 714 "value 0x%llx", 715 stuff_offset, 716 IP_CSUM(mp, start_offset, 0), 717 hdrp->value)); 718 } 719 } 720 721 NXGE_DEBUG_MSG((NULL, TX_CTL, 722 "==> nxge_tx_pkt_hdr_init: UDP" 723 "value 0x%llx", hdrp->value)); 724 break; 725 726 default: 727 goto fill_tx_header_done; 728 } 729 730 fill_tx_header_done: 731 NXGE_DEBUG_MSG((NULL, TX_CTL, 732 "==> nxge_fill_tx_hdr: pkt_len %d " 733 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 734 735 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 736 } 737 738 /*ARGSUSED*/ 739 p_mblk_t 740 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 741 { 742 p_mblk_t newmp = NULL; 743 744 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 745 NXGE_DEBUG_MSG((NULL, TX_CTL, 746 "<== nxge_tx_pkt_header_reserve: allocb failed")); 747 return (NULL); 748 } 749 750 NXGE_DEBUG_MSG((NULL, TX_CTL, 751 "==> nxge_tx_pkt_header_reserve: get new mp")); 752 DB_TYPE(newmp) = M_DATA; 753 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 754 linkb(newmp, mp); 755 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 756 757 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 758 "b_rptr $%p b_wptr $%p", 759 newmp->b_rptr, newmp->b_wptr)); 760 761 NXGE_DEBUG_MSG((NULL, TX_CTL, 762 "<== nxge_tx_pkt_header_reserve: use new mp")); 763 764 return (newmp); 765 } 766 767 int 768 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 769 { 770 uint_t nmblks; 771 ssize_t len; 772 uint_t pkt_len; 773 p_mblk_t nmp, bmp, tmp; 774 uint8_t *b_wptr; 775 776 NXGE_DEBUG_MSG((NULL, TX_CTL, 777 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 778 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 779 780 nmp = mp; 781 bmp = mp; 782 nmblks = 0; 783 pkt_len = 0; 784 *tot_xfer_len_p = 0; 785 786 while (nmp) { 787 len = MBLKL(nmp); 788 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 789 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 790 len, pkt_len, nmblks, 791 *tot_xfer_len_p)); 792 793 if (len <= 0) { 794 bmp = nmp; 795 nmp = nmp->b_cont; 796 NXGE_DEBUG_MSG((NULL, TX_CTL, 797 "==> nxge_tx_pkt_nmblocks: " 798 "len (0) pkt_len %d nmblks %d", 799 pkt_len, nmblks)); 800 continue; 801 } 802 803 *tot_xfer_len_p += len; 804 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 805 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 806 len, pkt_len, nmblks, 807 *tot_xfer_len_p)); 808 809 if (len < nxge_bcopy_thresh) { 810 NXGE_DEBUG_MSG((NULL, TX_CTL, 811 "==> nxge_tx_pkt_nmblocks: " 812 "len %d (< thresh) pkt_len %d nmblks %d", 813 len, pkt_len, nmblks)); 814 if (pkt_len == 0) 815 nmblks++; 816 pkt_len += len; 817 if (pkt_len >= nxge_bcopy_thresh) { 818 pkt_len = 0; 819 len = 0; 820 nmp = bmp; 821 } 822 } else { 823 NXGE_DEBUG_MSG((NULL, TX_CTL, 824 "==> nxge_tx_pkt_nmblocks: " 825 "len %d (> thresh) pkt_len %d nmblks %d", 826 len, pkt_len, nmblks)); 827 pkt_len = 0; 828 nmblks++; 829 /* 830 * Hardware limits the transfer length to 4K. 831 * If len is more than 4K, we need to break 832 * it up to at most 2 more blocks. 833 */ 834 if (len > TX_MAX_TRANSFER_LENGTH) { 835 uint32_t nsegs; 836 837 nsegs = 1; 838 NXGE_DEBUG_MSG((NULL, TX_CTL, 839 "==> nxge_tx_pkt_nmblocks: " 840 "len %d pkt_len %d nmblks %d nsegs %d", 841 len, pkt_len, nmblks, nsegs)); 842 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 843 ++nsegs; 844 } 845 do { 846 b_wptr = nmp->b_rptr + 847 TX_MAX_TRANSFER_LENGTH; 848 nmp->b_wptr = b_wptr; 849 if ((tmp = dupb(nmp)) == NULL) { 850 return (0); 851 } 852 tmp->b_rptr = b_wptr; 853 tmp->b_wptr = nmp->b_wptr; 854 tmp->b_cont = nmp->b_cont; 855 nmp->b_cont = tmp; 856 nmblks++; 857 if (--nsegs) { 858 nmp = tmp; 859 } 860 } while (nsegs); 861 nmp = tmp; 862 } 863 } 864 865 /* 866 * Hardware limits the transmit gather pointers to 15. 867 */ 868 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 869 TX_MAX_GATHER_POINTERS) { 870 NXGE_DEBUG_MSG((NULL, TX_CTL, 871 "==> nxge_tx_pkt_nmblocks: pull msg - " 872 "len %d pkt_len %d nmblks %d", 873 len, pkt_len, nmblks)); 874 /* Pull all message blocks from b_cont */ 875 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 876 return (0); 877 } 878 freemsg(nmp->b_cont); 879 nmp->b_cont = tmp; 880 pkt_len = 0; 881 } 882 bmp = nmp; 883 nmp = nmp->b_cont; 884 } 885 886 NXGE_DEBUG_MSG((NULL, TX_CTL, 887 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 888 "nmblks %d len %d tot_xfer_len %d", 889 mp->b_rptr, mp->b_wptr, nmblks, 890 MBLKL(mp), *tot_xfer_len_p)); 891 892 return (nmblks); 893 } 894 895 boolean_t 896 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 897 { 898 boolean_t status = B_TRUE; 899 p_nxge_dma_common_t tx_desc_dma_p; 900 nxge_dma_common_t desc_area; 901 p_tx_desc_t tx_desc_ring_vp; 902 p_tx_desc_t tx_desc_p; 903 p_tx_desc_t tx_desc_pp; 904 tx_desc_t r_tx_desc; 905 p_tx_msg_t tx_msg_ring; 906 p_tx_msg_t tx_msg_p; 907 npi_handle_t handle; 908 tx_ring_hdl_t tx_head; 909 uint32_t pkt_len; 910 uint_t tx_rd_index; 911 uint16_t head_index, tail_index; 912 uint8_t tdc; 913 boolean_t head_wrap, tail_wrap; 914 p_nxge_tx_ring_stats_t tdc_stats; 915 int rc; 916 917 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 918 919 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 920 (nmblks != 0)); 921 NXGE_DEBUG_MSG((nxgep, TX_CTL, 922 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 923 tx_ring_p->descs_pending, nxge_reclaim_pending, 924 nmblks)); 925 if (!status) { 926 tx_desc_dma_p = &tx_ring_p->tdc_desc; 927 desc_area = tx_ring_p->tdc_desc; 928 handle = NXGE_DEV_NPI_HANDLE(nxgep); 929 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 930 tx_desc_ring_vp = 931 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 932 tx_rd_index = tx_ring_p->rd_index; 933 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 934 tx_msg_ring = tx_ring_p->tx_msg_ring; 935 tx_msg_p = &tx_msg_ring[tx_rd_index]; 936 tdc = tx_ring_p->tdc; 937 tdc_stats = tx_ring_p->tdc_stats; 938 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 939 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 940 } 941 942 tail_index = tx_ring_p->wr_index; 943 tail_wrap = tx_ring_p->wr_index_wrap; 944 945 NXGE_DEBUG_MSG((nxgep, TX_CTL, 946 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 947 "tail_index %d tail_wrap %d " 948 "tx_desc_p $%p ($%p) ", 949 tdc, tx_rd_index, tail_index, tail_wrap, 950 tx_desc_p, (*(uint64_t *)tx_desc_p))); 951 /* 952 * Read the hardware maintained transmit head 953 * and wrap around bit. 954 */ 955 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 956 head_index = tx_head.bits.ldw.head; 957 head_wrap = tx_head.bits.ldw.wrap; 958 NXGE_DEBUG_MSG((nxgep, TX_CTL, 959 "==> nxge_txdma_reclaim: " 960 "tx_rd_index %d tail %d tail_wrap %d " 961 "head %d wrap %d", 962 tx_rd_index, tail_index, tail_wrap, 963 head_index, head_wrap)); 964 965 if (head_index == tail_index) { 966 if (TXDMA_RING_EMPTY(head_index, head_wrap, 967 tail_index, tail_wrap) && 968 (head_index == tx_rd_index)) { 969 NXGE_DEBUG_MSG((nxgep, TX_CTL, 970 "==> nxge_txdma_reclaim: EMPTY")); 971 return (B_TRUE); 972 } 973 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 975 "==> nxge_txdma_reclaim: Checking " 976 "if ring full")); 977 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 978 tail_wrap)) { 979 NXGE_DEBUG_MSG((nxgep, TX_CTL, 980 "==> nxge_txdma_reclaim: full")); 981 return (B_FALSE); 982 } 983 } 984 985 NXGE_DEBUG_MSG((nxgep, TX_CTL, 986 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 987 988 tx_desc_pp = &r_tx_desc; 989 while ((tx_rd_index != head_index) && 990 (tx_ring_p->descs_pending != 0)) { 991 992 NXGE_DEBUG_MSG((nxgep, TX_CTL, 993 "==> nxge_txdma_reclaim: Checking if pending")); 994 995 NXGE_DEBUG_MSG((nxgep, TX_CTL, 996 "==> nxge_txdma_reclaim: " 997 "descs_pending %d ", 998 tx_ring_p->descs_pending)); 999 1000 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1001 "==> nxge_txdma_reclaim: " 1002 "(tx_rd_index %d head_index %d " 1003 "(tx_desc_p $%p)", 1004 tx_rd_index, head_index, 1005 tx_desc_p)); 1006 1007 tx_desc_pp->value = tx_desc_p->value; 1008 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1009 "==> nxge_txdma_reclaim: " 1010 "(tx_rd_index %d head_index %d " 1011 "tx_desc_p $%p (desc value 0x%llx) ", 1012 tx_rd_index, head_index, 1013 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 1014 1015 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1016 "==> nxge_txdma_reclaim: dump desc:")); 1017 1018 pkt_len = tx_desc_pp->bits.hdw.tr_len; 1019 tdc_stats->obytes += pkt_len; 1020 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 1021 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1022 "==> nxge_txdma_reclaim: pkt_len %d " 1023 "tdc channel %d opackets %d", 1024 pkt_len, 1025 tdc, 1026 tdc_stats->opackets)); 1027 1028 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1029 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1030 "tx_desc_p = $%p " 1031 "tx_desc_pp = $%p " 1032 "index = %d", 1033 tx_desc_p, 1034 tx_desc_pp, 1035 tx_ring_p->rd_index)); 1036 (void) dvma_unload(tx_msg_p->dvma_handle, 1037 0, -1); 1038 tx_msg_p->dvma_handle = NULL; 1039 if (tx_ring_p->dvma_wr_index == 1040 tx_ring_p->dvma_wrap_mask) { 1041 tx_ring_p->dvma_wr_index = 0; 1042 } else { 1043 tx_ring_p->dvma_wr_index++; 1044 } 1045 tx_ring_p->dvma_pending--; 1046 } else if (tx_msg_p->flags.dma_type == 1047 USE_DMA) { 1048 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1049 "==> nxge_txdma_reclaim: " 1050 "USE DMA")); 1051 if (rc = ddi_dma_unbind_handle 1052 (tx_msg_p->dma_handle)) { 1053 cmn_err(CE_WARN, "!nxge_reclaim: " 1054 "ddi_dma_unbind_handle " 1055 "failed. status %d", rc); 1056 } 1057 } 1058 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1059 "==> nxge_txdma_reclaim: count packets")); 1060 /* 1061 * count a chained packet only once. 1062 */ 1063 if (tx_msg_p->tx_message != NULL) { 1064 freemsg(tx_msg_p->tx_message); 1065 tx_msg_p->tx_message = NULL; 1066 } 1067 1068 tx_msg_p->flags.dma_type = USE_NONE; 1069 tx_rd_index = tx_ring_p->rd_index; 1070 tx_rd_index = (tx_rd_index + 1) & 1071 tx_ring_p->tx_wrap_mask; 1072 tx_ring_p->rd_index = tx_rd_index; 1073 tx_ring_p->descs_pending--; 1074 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1075 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1076 } 1077 1078 status = (nmblks <= (tx_ring_p->tx_ring_size - 1079 tx_ring_p->descs_pending - 1080 TX_FULL_MARK)); 1081 if (status) { 1082 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1083 } 1084 } else { 1085 status = (nmblks <= 1086 (tx_ring_p->tx_ring_size - 1087 tx_ring_p->descs_pending - 1088 TX_FULL_MARK)); 1089 } 1090 1091 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1092 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1093 1094 return (status); 1095 } 1096 1097 /* 1098 * nxge_tx_intr 1099 * 1100 * Process a TDC interrupt 1101 * 1102 * Arguments: 1103 * arg1 A Logical Device state Vector (LSV) data structure. 1104 * arg2 nxge_t * 1105 * 1106 * Notes: 1107 * 1108 * NPI/NXGE function calls: 1109 * npi_txdma_control_status() 1110 * npi_intr_ldg_mgmt_set() 1111 * 1112 * nxge_tx_err_evnts() 1113 * nxge_txdma_reclaim() 1114 * 1115 * Registers accessed: 1116 * TX_CS DMC+0x40028 Transmit Control And Status 1117 * PIO_LDSV 1118 * 1119 * Context: 1120 * Any domain 1121 */ 1122 uint_t 1123 nxge_tx_intr(void *arg1, void *arg2) 1124 { 1125 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1126 p_nxge_t nxgep = (p_nxge_t)arg2; 1127 p_nxge_ldg_t ldgp; 1128 uint8_t channel; 1129 uint32_t vindex; 1130 npi_handle_t handle; 1131 tx_cs_t cs; 1132 p_tx_ring_t *tx_rings; 1133 p_tx_ring_t tx_ring_p; 1134 npi_status_t rs = NPI_SUCCESS; 1135 uint_t serviced = DDI_INTR_UNCLAIMED; 1136 nxge_status_t status = NXGE_OK; 1137 1138 if (ldvp == NULL) { 1139 NXGE_DEBUG_MSG((NULL, INT_CTL, 1140 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1141 nxgep, ldvp)); 1142 return (DDI_INTR_UNCLAIMED); 1143 } 1144 1145 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1146 nxgep = ldvp->nxgep; 1147 } 1148 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1149 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1150 nxgep, ldvp)); 1151 1152 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1153 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1154 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1155 "<== nxge_tx_intr: interface not started or intialized")); 1156 return (DDI_INTR_CLAIMED); 1157 } 1158 1159 /* 1160 * This interrupt handler is for a specific 1161 * transmit dma channel. 1162 */ 1163 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1164 /* Get the control and status for this channel. */ 1165 channel = ldvp->channel; 1166 ldgp = ldvp->ldgp; 1167 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1168 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1169 "channel %d", 1170 nxgep, ldvp, channel)); 1171 1172 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1173 vindex = ldvp->vdma_index; 1174 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1175 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1176 channel, vindex, rs)); 1177 if (!rs && cs.bits.ldw.mk) { 1178 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1179 "==> nxge_tx_intr:channel %d ring index %d " 1180 "status 0x%08x (mk bit set)", 1181 channel, vindex, rs)); 1182 tx_rings = nxgep->tx_rings->rings; 1183 tx_ring_p = tx_rings[vindex]; 1184 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1185 "==> nxge_tx_intr:channel %d ring index %d " 1186 "status 0x%08x (mk bit set, calling reclaim)", 1187 channel, vindex, rs)); 1188 1189 nxge_tx_ring_task((void *)tx_ring_p); 1190 } 1191 1192 /* 1193 * Process other transmit control and status. 1194 * Check the ldv state. 1195 */ 1196 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1197 /* 1198 * Rearm this logical group if this is a single device 1199 * group. 1200 */ 1201 if (ldgp->nldvs == 1) { 1202 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1203 "==> nxge_tx_intr: rearm")); 1204 if (status == NXGE_OK) { 1205 if (isLDOMguest(nxgep)) { 1206 nxge_hio_ldgimgn(nxgep, ldgp); 1207 } else { 1208 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1209 B_TRUE, ldgp->ldg_timer); 1210 } 1211 } 1212 } 1213 1214 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1215 serviced = DDI_INTR_CLAIMED; 1216 return (serviced); 1217 } 1218 1219 void 1220 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1221 { 1222 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1223 1224 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1225 1226 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1227 } 1228 1229 void 1230 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1231 { 1232 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1233 1234 (void) nxge_txdma_stop(nxgep); 1235 1236 (void) nxge_fixup_txdma_rings(nxgep); 1237 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1238 (void) nxge_tx_mac_enable(nxgep); 1239 (void) nxge_txdma_hw_kick(nxgep); 1240 1241 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1242 } 1243 1244 npi_status_t 1245 nxge_txdma_channel_disable( 1246 nxge_t *nxge, 1247 int channel) 1248 { 1249 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1250 npi_status_t rs; 1251 tdmc_intr_dbg_t intr_dbg; 1252 1253 /* 1254 * Stop the dma channel and wait for the stop-done. 1255 * If the stop-done bit is not present, then force 1256 * an error so TXC will stop. 1257 * All channels bound to this port need to be stopped 1258 * and reset after injecting an interrupt error. 1259 */ 1260 rs = npi_txdma_channel_disable(handle, channel); 1261 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1262 "==> nxge_txdma_channel_disable(%d) " 1263 "rs 0x%x", channel, rs)); 1264 if (rs != NPI_SUCCESS) { 1265 /* Inject any error */ 1266 intr_dbg.value = 0; 1267 intr_dbg.bits.ldw.nack_pref = 1; 1268 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1269 "==> nxge_txdma_hw_mode: " 1270 "channel %d (stop failed 0x%x) " 1271 "(inject err)", rs, channel)); 1272 (void) npi_txdma_inj_int_error_set( 1273 handle, channel, &intr_dbg); 1274 rs = npi_txdma_channel_disable(handle, channel); 1275 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1276 "==> nxge_txdma_hw_mode: " 1277 "channel %d (stop again 0x%x) " 1278 "(after inject err)", 1279 rs, channel)); 1280 } 1281 1282 return (rs); 1283 } 1284 1285 /* 1286 * nxge_txdma_hw_mode 1287 * 1288 * Toggle all TDCs on (enable) or off (disable). 1289 * 1290 * Arguments: 1291 * nxgep 1292 * enable Enable or disable a TDC. 1293 * 1294 * Notes: 1295 * 1296 * NPI/NXGE function calls: 1297 * npi_txdma_channel_enable(TX_CS) 1298 * npi_txdma_channel_disable(TX_CS) 1299 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1300 * 1301 * Registers accessed: 1302 * TX_CS DMC+0x40028 Transmit Control And Status 1303 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1304 * 1305 * Context: 1306 * Any domain 1307 */ 1308 nxge_status_t 1309 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1310 { 1311 nxge_grp_set_t *set = &nxgep->tx_set; 1312 1313 npi_handle_t handle; 1314 nxge_status_t status; 1315 npi_status_t rs; 1316 int tdc; 1317 1318 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1319 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1320 1321 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1322 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1323 "<== nxge_txdma_mode: not initialized")); 1324 return (NXGE_ERROR); 1325 } 1326 1327 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1328 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1329 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1330 return (NXGE_ERROR); 1331 } 1332 1333 /* Enable or disable all of the TDCs owned by us. */ 1334 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1335 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1336 if ((1 << tdc) & set->owned.map) { 1337 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1338 if (ring) { 1339 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1340 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1341 if (enable) { 1342 rs = npi_txdma_channel_enable 1343 (handle, tdc); 1344 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1345 "==> nxge_txdma_hw_mode: " 1346 "channel %d (enable) rs 0x%x", 1347 tdc, rs)); 1348 } else { 1349 rs = nxge_txdma_channel_disable 1350 (nxgep, tdc); 1351 } 1352 } 1353 } 1354 } 1355 1356 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1357 1358 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1359 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1360 1361 return (status); 1362 } 1363 1364 void 1365 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1366 { 1367 npi_handle_t handle; 1368 1369 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1370 "==> nxge_txdma_enable_channel: channel %d", channel)); 1371 1372 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1373 /* enable the transmit dma channels */ 1374 (void) npi_txdma_channel_enable(handle, channel); 1375 1376 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1377 } 1378 1379 void 1380 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1381 { 1382 npi_handle_t handle; 1383 1384 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1385 "==> nxge_txdma_disable_channel: channel %d", channel)); 1386 1387 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1388 /* stop the transmit dma channels */ 1389 (void) npi_txdma_channel_disable(handle, channel); 1390 1391 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1392 } 1393 1394 /* 1395 * nxge_txdma_stop_inj_err 1396 * 1397 * Stop a TDC. If at first we don't succeed, inject an error. 1398 * 1399 * Arguments: 1400 * nxgep 1401 * channel The channel to stop. 1402 * 1403 * Notes: 1404 * 1405 * NPI/NXGE function calls: 1406 * npi_txdma_channel_disable() 1407 * npi_txdma_inj_int_error_set() 1408 * #if defined(NXGE_DEBUG) 1409 * nxge_txdma_regs_dump_channels(nxgep); 1410 * #endif 1411 * 1412 * Registers accessed: 1413 * TX_CS DMC+0x40028 Transmit Control And Status 1414 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1415 * 1416 * Context: 1417 * Any domain 1418 */ 1419 int 1420 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1421 { 1422 npi_handle_t handle; 1423 tdmc_intr_dbg_t intr_dbg; 1424 int status; 1425 npi_status_t rs = NPI_SUCCESS; 1426 1427 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1428 /* 1429 * Stop the dma channel waits for the stop done. 1430 * If the stop done bit is not set, then create 1431 * an error. 1432 */ 1433 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1434 rs = npi_txdma_channel_disable(handle, channel); 1435 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1436 if (status == NXGE_OK) { 1437 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1438 "<== nxge_txdma_stop_inj_err (channel %d): " 1439 "stopped OK", channel)); 1440 return (status); 1441 } 1442 1443 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1444 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1445 "injecting error", channel, rs)); 1446 /* Inject any error */ 1447 intr_dbg.value = 0; 1448 intr_dbg.bits.ldw.nack_pref = 1; 1449 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1450 1451 /* Stop done bit will be set as a result of error injection */ 1452 rs = npi_txdma_channel_disable(handle, channel); 1453 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1454 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1455 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1456 "<== nxge_txdma_stop_inj_err (channel %d): " 1457 "stopped OK ", channel)); 1458 return (status); 1459 } 1460 1461 #if defined(NXGE_DEBUG) 1462 nxge_txdma_regs_dump_channels(nxgep); 1463 #endif 1464 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1465 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1466 " (injected error but still not stopped)", channel, rs)); 1467 1468 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1469 return (status); 1470 } 1471 1472 /*ARGSUSED*/ 1473 void 1474 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1475 { 1476 nxge_grp_set_t *set = &nxgep->tx_set; 1477 int tdc; 1478 1479 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1480 1481 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1483 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1484 return; 1485 } 1486 1487 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1488 if ((1 << tdc) & set->owned.map) { 1489 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1490 if (ring) { 1491 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1492 "==> nxge_fixup_txdma_rings: channel %d", 1493 tdc)); 1494 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1495 } 1496 } 1497 } 1498 1499 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1500 } 1501 1502 /*ARGSUSED*/ 1503 void 1504 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1505 { 1506 p_tx_ring_t ring_p; 1507 1508 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1509 ring_p = nxge_txdma_get_ring(nxgep, channel); 1510 if (ring_p == NULL) { 1511 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1512 return; 1513 } 1514 1515 if (ring_p->tdc != channel) { 1516 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1517 "<== nxge_txdma_fix_channel: channel not matched " 1518 "ring tdc %d passed channel", 1519 ring_p->tdc, channel)); 1520 return; 1521 } 1522 1523 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1524 1525 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1526 } 1527 1528 /*ARGSUSED*/ 1529 void 1530 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1531 { 1532 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1533 1534 if (ring_p == NULL) { 1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1536 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1537 return; 1538 } 1539 1540 if (ring_p->tdc != channel) { 1541 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1542 "<== nxge_txdma_fixup_channel: channel not matched " 1543 "ring tdc %d passed channel", 1544 ring_p->tdc, channel)); 1545 return; 1546 } 1547 1548 MUTEX_ENTER(&ring_p->lock); 1549 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1550 ring_p->rd_index = 0; 1551 ring_p->wr_index = 0; 1552 ring_p->ring_head.value = 0; 1553 ring_p->ring_kick_tail.value = 0; 1554 ring_p->descs_pending = 0; 1555 MUTEX_EXIT(&ring_p->lock); 1556 1557 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1558 } 1559 1560 /*ARGSUSED*/ 1561 void 1562 nxge_txdma_hw_kick(p_nxge_t nxgep) 1563 { 1564 nxge_grp_set_t *set = &nxgep->tx_set; 1565 int tdc; 1566 1567 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1568 1569 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1570 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1571 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1572 return; 1573 } 1574 1575 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1576 if ((1 << tdc) & set->owned.map) { 1577 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1578 if (ring) { 1579 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1580 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1581 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1582 } 1583 } 1584 } 1585 1586 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1587 } 1588 1589 /*ARGSUSED*/ 1590 void 1591 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1592 { 1593 p_tx_ring_t ring_p; 1594 1595 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1596 1597 ring_p = nxge_txdma_get_ring(nxgep, channel); 1598 if (ring_p == NULL) { 1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1600 " nxge_txdma_kick_channel")); 1601 return; 1602 } 1603 1604 if (ring_p->tdc != channel) { 1605 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1606 "<== nxge_txdma_kick_channel: channel not matched " 1607 "ring tdc %d passed channel", 1608 ring_p->tdc, channel)); 1609 return; 1610 } 1611 1612 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1613 1614 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1615 } 1616 1617 /*ARGSUSED*/ 1618 void 1619 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1620 { 1621 1622 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1623 1624 if (ring_p == NULL) { 1625 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1626 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1627 return; 1628 } 1629 1630 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1631 } 1632 1633 /* 1634 * nxge_check_tx_hang 1635 * 1636 * Check the state of all TDCs belonging to nxgep. 1637 * 1638 * Arguments: 1639 * nxgep 1640 * 1641 * Notes: 1642 * Called by nxge_hw.c:nxge_check_hw_state(). 1643 * 1644 * NPI/NXGE function calls: 1645 * 1646 * Registers accessed: 1647 * 1648 * Context: 1649 * Any domain 1650 */ 1651 /*ARGSUSED*/ 1652 void 1653 nxge_check_tx_hang(p_nxge_t nxgep) 1654 { 1655 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1656 1657 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1658 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1659 goto nxge_check_tx_hang_exit; 1660 } 1661 1662 /* 1663 * Needs inputs from hardware for regs: 1664 * head index had not moved since last timeout. 1665 * packets not transmitted or stuffed registers. 1666 */ 1667 if (nxge_txdma_hung(nxgep)) { 1668 nxge_fixup_hung_txdma_rings(nxgep); 1669 } 1670 1671 nxge_check_tx_hang_exit: 1672 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1673 } 1674 1675 /* 1676 * nxge_txdma_hung 1677 * 1678 * Reset a TDC. 1679 * 1680 * Arguments: 1681 * nxgep 1682 * channel The channel to reset. 1683 * reg_data The current TX_CS. 1684 * 1685 * Notes: 1686 * Called by nxge_check_tx_hang() 1687 * 1688 * NPI/NXGE function calls: 1689 * nxge_txdma_channel_hung() 1690 * 1691 * Registers accessed: 1692 * 1693 * Context: 1694 * Any domain 1695 */ 1696 int 1697 nxge_txdma_hung(p_nxge_t nxgep) 1698 { 1699 nxge_grp_set_t *set = &nxgep->tx_set; 1700 int tdc; 1701 boolean_t shared; 1702 1703 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1704 1705 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1706 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1707 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1708 return (B_FALSE); 1709 } 1710 1711 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1712 /* 1713 * Grab the shared state of the TDC. 1714 */ 1715 if (isLDOMservice(nxgep)) { 1716 nxge_hio_data_t *nhd = 1717 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1718 1719 MUTEX_ENTER(&nhd->lock); 1720 shared = nxgep->tdc_is_shared[tdc]; 1721 MUTEX_EXIT(&nhd->lock); 1722 } else { 1723 shared = B_FALSE; 1724 } 1725 1726 /* 1727 * Now, process continue to process. 1728 */ 1729 if (((1 << tdc) & set->owned.map) && !shared) { 1730 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1731 if (ring) { 1732 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1733 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1734 "==> nxge_txdma_hung: TDC %d hung", 1735 tdc)); 1736 return (B_TRUE); 1737 } 1738 } 1739 } 1740 } 1741 1742 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1743 1744 return (B_FALSE); 1745 } 1746 1747 /* 1748 * nxge_txdma_channel_hung 1749 * 1750 * Reset a TDC. 1751 * 1752 * Arguments: 1753 * nxgep 1754 * ring <channel>'s ring. 1755 * channel The channel to reset. 1756 * 1757 * Notes: 1758 * Called by nxge_txdma.c:nxge_txdma_hung() 1759 * 1760 * NPI/NXGE function calls: 1761 * npi_txdma_ring_head_get() 1762 * 1763 * Registers accessed: 1764 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1765 * 1766 * Context: 1767 * Any domain 1768 */ 1769 int 1770 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1771 { 1772 uint16_t head_index, tail_index; 1773 boolean_t head_wrap, tail_wrap; 1774 npi_handle_t handle; 1775 tx_ring_hdl_t tx_head; 1776 uint_t tx_rd_index; 1777 1778 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1779 1780 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1781 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1782 "==> nxge_txdma_channel_hung: channel %d", channel)); 1783 MUTEX_ENTER(&tx_ring_p->lock); 1784 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1785 1786 tail_index = tx_ring_p->wr_index; 1787 tail_wrap = tx_ring_p->wr_index_wrap; 1788 tx_rd_index = tx_ring_p->rd_index; 1789 MUTEX_EXIT(&tx_ring_p->lock); 1790 1791 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1792 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1793 "tail_index %d tail_wrap %d ", 1794 channel, tx_rd_index, tail_index, tail_wrap)); 1795 /* 1796 * Read the hardware maintained transmit head 1797 * and wrap around bit. 1798 */ 1799 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1800 head_index = tx_head.bits.ldw.head; 1801 head_wrap = tx_head.bits.ldw.wrap; 1802 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1803 "==> nxge_txdma_channel_hung: " 1804 "tx_rd_index %d tail %d tail_wrap %d " 1805 "head %d wrap %d", 1806 tx_rd_index, tail_index, tail_wrap, 1807 head_index, head_wrap)); 1808 1809 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1810 tail_index, tail_wrap) && 1811 (head_index == tx_rd_index)) { 1812 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1813 "==> nxge_txdma_channel_hung: EMPTY")); 1814 return (B_FALSE); 1815 } 1816 1817 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1818 "==> nxge_txdma_channel_hung: Checking if ring full")); 1819 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1820 tail_wrap)) { 1821 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1822 "==> nxge_txdma_channel_hung: full")); 1823 return (B_TRUE); 1824 } 1825 1826 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1827 1828 return (B_FALSE); 1829 } 1830 1831 /* 1832 * nxge_fixup_hung_txdma_rings 1833 * 1834 * Disable a TDC. 1835 * 1836 * Arguments: 1837 * nxgep 1838 * channel The channel to reset. 1839 * reg_data The current TX_CS. 1840 * 1841 * Notes: 1842 * Called by nxge_check_tx_hang() 1843 * 1844 * NPI/NXGE function calls: 1845 * npi_txdma_ring_head_get() 1846 * 1847 * Registers accessed: 1848 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1849 * 1850 * Context: 1851 * Any domain 1852 */ 1853 /*ARGSUSED*/ 1854 void 1855 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1856 { 1857 nxge_grp_set_t *set = &nxgep->tx_set; 1858 int tdc; 1859 1860 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1861 1862 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1863 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1864 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1865 return; 1866 } 1867 1868 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1869 if ((1 << tdc) & set->owned.map) { 1870 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1871 if (ring) { 1872 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1873 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1874 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1875 tdc)); 1876 } 1877 } 1878 } 1879 1880 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1881 } 1882 1883 /* 1884 * nxge_txdma_fixup_hung_channel 1885 * 1886 * 'Fix' a hung TDC. 1887 * 1888 * Arguments: 1889 * nxgep 1890 * channel The channel to fix. 1891 * 1892 * Notes: 1893 * Called by nxge_fixup_hung_txdma_rings() 1894 * 1895 * 1. Reclaim the TDC. 1896 * 2. Disable the TDC. 1897 * 1898 * NPI/NXGE function calls: 1899 * nxge_txdma_reclaim() 1900 * npi_txdma_channel_disable(TX_CS) 1901 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1902 * 1903 * Registers accessed: 1904 * TX_CS DMC+0x40028 Transmit Control And Status 1905 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1906 * 1907 * Context: 1908 * Any domain 1909 */ 1910 /*ARGSUSED*/ 1911 void 1912 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1913 { 1914 p_tx_ring_t ring_p; 1915 1916 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1917 ring_p = nxge_txdma_get_ring(nxgep, channel); 1918 if (ring_p == NULL) { 1919 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1920 "<== nxge_txdma_fix_hung_channel")); 1921 return; 1922 } 1923 1924 if (ring_p->tdc != channel) { 1925 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1926 "<== nxge_txdma_fix_hung_channel: channel not matched " 1927 "ring tdc %d passed channel", 1928 ring_p->tdc, channel)); 1929 return; 1930 } 1931 1932 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1933 1934 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1935 } 1936 1937 /*ARGSUSED*/ 1938 void 1939 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1940 uint16_t channel) 1941 { 1942 npi_handle_t handle; 1943 tdmc_intr_dbg_t intr_dbg; 1944 int status = NXGE_OK; 1945 1946 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1947 1948 if (ring_p == NULL) { 1949 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1950 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1951 return; 1952 } 1953 1954 if (ring_p->tdc != channel) { 1955 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1956 "<== nxge_txdma_fixup_hung_channel: channel " 1957 "not matched " 1958 "ring tdc %d passed channel", 1959 ring_p->tdc, channel)); 1960 return; 1961 } 1962 1963 /* Reclaim descriptors */ 1964 MUTEX_ENTER(&ring_p->lock); 1965 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1966 MUTEX_EXIT(&ring_p->lock); 1967 1968 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1969 /* 1970 * Stop the dma channel waits for the stop done. 1971 * If the stop done bit is not set, then force 1972 * an error. 1973 */ 1974 status = npi_txdma_channel_disable(handle, channel); 1975 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1976 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1977 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1978 "ring tdc %d passed channel %d", 1979 ring_p->tdc, channel)); 1980 return; 1981 } 1982 1983 /* Inject any error */ 1984 intr_dbg.value = 0; 1985 intr_dbg.bits.ldw.nack_pref = 1; 1986 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1987 1988 /* Stop done bit will be set as a result of error injection */ 1989 status = npi_txdma_channel_disable(handle, channel); 1990 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1991 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1992 "<== nxge_txdma_fixup_hung_channel: stopped again" 1993 "ring tdc %d passed channel", 1994 ring_p->tdc, channel)); 1995 return; 1996 } 1997 1998 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1999 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 2000 "ring tdc %d passed channel", 2001 ring_p->tdc, channel)); 2002 2003 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 2004 } 2005 2006 /*ARGSUSED*/ 2007 void 2008 nxge_reclaim_rings(p_nxge_t nxgep) 2009 { 2010 nxge_grp_set_t *set = &nxgep->tx_set; 2011 int tdc; 2012 2013 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 2014 2015 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2016 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2017 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2018 return; 2019 } 2020 2021 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2022 if ((1 << tdc) & set->owned.map) { 2023 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2024 if (ring) { 2025 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2026 "==> nxge_reclaim_rings: TDC %d", tdc)); 2027 MUTEX_ENTER(&ring->lock); 2028 (void) nxge_txdma_reclaim(nxgep, ring, 0); 2029 MUTEX_EXIT(&ring->lock); 2030 } 2031 } 2032 } 2033 2034 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 2035 } 2036 2037 void 2038 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 2039 { 2040 nxge_grp_set_t *set = &nxgep->tx_set; 2041 npi_handle_t handle; 2042 int tdc; 2043 2044 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2045 2046 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2047 2048 if (!isLDOMguest(nxgep)) { 2049 (void) npi_txdma_dump_fzc_regs(handle); 2050 2051 /* Dump TXC registers. */ 2052 (void) npi_txc_dump_fzc_regs(handle); 2053 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2054 } 2055 2056 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2057 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2058 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2059 return; 2060 } 2061 2062 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2063 if ((1 << tdc) & set->owned.map) { 2064 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2065 if (ring) { 2066 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2067 "==> nxge_txdma_regs_dump_channels: " 2068 "TDC %d", tdc)); 2069 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2070 2071 /* Dump TXC registers, if able to. */ 2072 if (!isLDOMguest(nxgep)) { 2073 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2074 "==> nxge_txdma_regs_dump_channels:" 2075 " FZC TDC %d", tdc)); 2076 (void) npi_txc_dump_tdc_fzc_regs 2077 (handle, tdc); 2078 } 2079 nxge_txdma_regs_dump(nxgep, tdc); 2080 } 2081 } 2082 } 2083 2084 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2085 } 2086 2087 void 2088 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2089 { 2090 npi_handle_t handle; 2091 tx_ring_hdl_t hdl; 2092 tx_ring_kick_t kick; 2093 tx_cs_t cs; 2094 txc_control_t control; 2095 uint32_t bitmap = 0; 2096 uint32_t burst = 0; 2097 uint32_t bytes = 0; 2098 dma_log_page_t cfg; 2099 2100 printf("\n\tfunc # %d tdc %d ", 2101 nxgep->function_num, channel); 2102 cfg.page_num = 0; 2103 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2104 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2105 printf("\n\tlog page func %d valid page 0 %d", 2106 cfg.func_num, cfg.valid); 2107 cfg.page_num = 1; 2108 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2109 printf("\n\tlog page func %d valid page 1 %d", 2110 cfg.func_num, cfg.valid); 2111 2112 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2113 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2114 printf("\n\thead value is 0x%0llx", 2115 (long long)hdl.value); 2116 printf("\n\thead index %d", hdl.bits.ldw.head); 2117 printf("\n\tkick value is 0x%0llx", 2118 (long long)kick.value); 2119 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2120 2121 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2122 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2123 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2124 2125 (void) npi_txc_control(handle, OP_GET, &control); 2126 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2127 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2128 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2129 2130 printf("\n\tTXC port control 0x%0llx", 2131 (long long)control.value); 2132 printf("\n\tTXC port bitmap 0x%x", bitmap); 2133 printf("\n\tTXC max burst %d", burst); 2134 printf("\n\tTXC bytes xmt %d\n", bytes); 2135 2136 { 2137 ipp_status_t status; 2138 2139 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2140 #if defined(__i386) 2141 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2142 #else 2143 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2144 #endif 2145 } 2146 } 2147 2148 /* 2149 * nxge_tdc_hvio_setup 2150 * 2151 * I'm not exactly sure what this code does. 2152 * 2153 * Arguments: 2154 * nxgep 2155 * channel The channel to map. 2156 * 2157 * Notes: 2158 * 2159 * NPI/NXGE function calls: 2160 * na 2161 * 2162 * Context: 2163 * Service domain? 2164 */ 2165 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2166 static void 2167 nxge_tdc_hvio_setup( 2168 nxge_t *nxgep, int channel) 2169 { 2170 nxge_dma_common_t *data; 2171 nxge_dma_common_t *control; 2172 tx_ring_t *ring; 2173 2174 ring = nxgep->tx_rings->rings[channel]; 2175 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2176 2177 ring->hv_set = B_FALSE; 2178 2179 ring->hv_tx_buf_base_ioaddr_pp = 2180 (uint64_t)data->orig_ioaddr_pp; 2181 ring->hv_tx_buf_ioaddr_size = 2182 (uint64_t)data->orig_alength; 2183 2184 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2185 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2186 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2187 ring->hv_tx_buf_base_ioaddr_pp, 2188 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2189 data->ioaddr_pp, data->orig_vatopa, 2190 data->orig_alength, data->orig_alength)); 2191 2192 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2193 2194 ring->hv_tx_cntl_base_ioaddr_pp = 2195 (uint64_t)control->orig_ioaddr_pp; 2196 ring->hv_tx_cntl_ioaddr_size = 2197 (uint64_t)control->orig_alength; 2198 2199 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2200 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2201 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2202 ring->hv_tx_cntl_base_ioaddr_pp, 2203 control->orig_ioaddr_pp, control->orig_vatopa, 2204 ring->hv_tx_cntl_ioaddr_size, 2205 control->orig_alength, control->orig_alength)); 2206 } 2207 #endif 2208 2209 static nxge_status_t 2210 nxge_map_txdma(p_nxge_t nxgep, int channel) 2211 { 2212 nxge_dma_common_t **pData; 2213 nxge_dma_common_t **pControl; 2214 tx_ring_t **pRing, *ring; 2215 tx_mbox_t **mailbox; 2216 uint32_t num_chunks; 2217 2218 nxge_status_t status = NXGE_OK; 2219 2220 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2221 2222 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2223 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2224 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2225 "<== nxge_map_txdma: buf not allocated")); 2226 return (NXGE_ERROR); 2227 } 2228 } 2229 2230 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2231 return (NXGE_ERROR); 2232 2233 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2234 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2235 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2236 pRing = &nxgep->tx_rings->rings[channel]; 2237 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2238 2239 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2240 "tx_rings $%p tx_desc_rings $%p", 2241 nxgep->tx_rings, nxgep->tx_rings->rings)); 2242 2243 /* 2244 * Map descriptors from the buffer pools for <channel>. 2245 */ 2246 2247 /* 2248 * Set up and prepare buffer blocks, descriptors 2249 * and mailbox. 2250 */ 2251 status = nxge_map_txdma_channel(nxgep, channel, 2252 pData, pRing, num_chunks, pControl, mailbox); 2253 if (status != NXGE_OK) { 2254 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2255 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2256 "returned 0x%x", 2257 nxgep, channel, status)); 2258 return (status); 2259 } 2260 2261 ring = *pRing; 2262 2263 ring->index = (uint16_t)channel; 2264 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2265 2266 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2267 if (isLDOMguest(nxgep)) { 2268 (void) nxge_tdc_lp_conf(nxgep, channel); 2269 } else { 2270 nxge_tdc_hvio_setup(nxgep, channel); 2271 } 2272 #endif 2273 2274 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2275 "(status 0x%x channel %d)", status, channel)); 2276 2277 return (status); 2278 } 2279 2280 static nxge_status_t 2281 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2282 p_nxge_dma_common_t *dma_buf_p, 2283 p_tx_ring_t *tx_desc_p, 2284 uint32_t num_chunks, 2285 p_nxge_dma_common_t *dma_cntl_p, 2286 p_tx_mbox_t *tx_mbox_p) 2287 { 2288 int status = NXGE_OK; 2289 2290 /* 2291 * Set up and prepare buffer blocks, descriptors 2292 * and mailbox. 2293 */ 2294 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2295 "==> nxge_map_txdma_channel (channel %d)", channel)); 2296 /* 2297 * Transmit buffer blocks 2298 */ 2299 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2300 dma_buf_p, tx_desc_p, num_chunks); 2301 if (status != NXGE_OK) { 2302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2303 "==> nxge_map_txdma_channel (channel %d): " 2304 "map buffer failed 0x%x", channel, status)); 2305 goto nxge_map_txdma_channel_exit; 2306 } 2307 2308 /* 2309 * Transmit block ring, and mailbox. 2310 */ 2311 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2312 tx_mbox_p); 2313 2314 goto nxge_map_txdma_channel_exit; 2315 2316 nxge_map_txdma_channel_fail1: 2317 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2318 "==> nxge_map_txdma_channel: unmap buf" 2319 "(status 0x%x channel %d)", 2320 status, channel)); 2321 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2322 2323 nxge_map_txdma_channel_exit: 2324 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2325 "<== nxge_map_txdma_channel: " 2326 "(status 0x%x channel %d)", 2327 status, channel)); 2328 2329 return (status); 2330 } 2331 2332 /*ARGSUSED*/ 2333 static void 2334 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2335 { 2336 tx_ring_t *ring; 2337 tx_mbox_t *mailbox; 2338 2339 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2340 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2341 /* 2342 * unmap tx block ring, and mailbox. 2343 */ 2344 ring = nxgep->tx_rings->rings[channel]; 2345 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2346 2347 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2348 2349 /* unmap buffer blocks */ 2350 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2351 2352 nxge_free_txb(nxgep, channel); 2353 2354 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2355 } 2356 2357 /* 2358 * nxge_map_txdma_channel_cfg_ring 2359 * 2360 * Map a TDC into our kernel space. 2361 * This function allocates all of the per-channel data structures. 2362 * 2363 * Arguments: 2364 * nxgep 2365 * dma_channel The channel to map. 2366 * dma_cntl_p 2367 * tx_ring_p dma_channel's transmit ring 2368 * tx_mbox_p dma_channel's mailbox 2369 * 2370 * Notes: 2371 * 2372 * NPI/NXGE function calls: 2373 * nxge_setup_dma_common() 2374 * 2375 * Registers accessed: 2376 * none. 2377 * 2378 * Context: 2379 * Any domain 2380 */ 2381 /*ARGSUSED*/ 2382 static void 2383 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2384 p_nxge_dma_common_t *dma_cntl_p, 2385 p_tx_ring_t tx_ring_p, 2386 p_tx_mbox_t *tx_mbox_p) 2387 { 2388 p_tx_mbox_t mboxp; 2389 p_nxge_dma_common_t cntl_dmap; 2390 p_nxge_dma_common_t dmap; 2391 p_tx_rng_cfig_t tx_ring_cfig_p; 2392 p_tx_ring_kick_t tx_ring_kick_p; 2393 p_tx_cs_t tx_cs_p; 2394 p_tx_dma_ent_msk_t tx_evmask_p; 2395 p_txdma_mbh_t mboxh_p; 2396 p_txdma_mbl_t mboxl_p; 2397 uint64_t tx_desc_len; 2398 2399 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2400 "==> nxge_map_txdma_channel_cfg_ring")); 2401 2402 cntl_dmap = *dma_cntl_p; 2403 2404 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2405 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2406 sizeof (tx_desc_t)); 2407 /* 2408 * Zero out transmit ring descriptors. 2409 */ 2410 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2411 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2412 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2413 tx_cs_p = &(tx_ring_p->tx_cs); 2414 tx_evmask_p = &(tx_ring_p->tx_evmask); 2415 tx_ring_cfig_p->value = 0; 2416 tx_ring_kick_p->value = 0; 2417 tx_cs_p->value = 0; 2418 tx_evmask_p->value = 0; 2419 2420 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2421 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2422 dma_channel, 2423 dmap->dma_cookie.dmac_laddress)); 2424 2425 tx_ring_cfig_p->value = 0; 2426 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2427 tx_ring_cfig_p->value = 2428 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2429 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2430 2431 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2432 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2433 dma_channel, 2434 tx_ring_cfig_p->value)); 2435 2436 tx_cs_p->bits.ldw.rst = 1; 2437 2438 /* Map in mailbox */ 2439 mboxp = (p_tx_mbox_t) 2440 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2441 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2442 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2443 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2444 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2445 mboxh_p->value = mboxl_p->value = 0; 2446 2447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2448 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2449 dmap->dma_cookie.dmac_laddress)); 2450 2451 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2452 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2453 2454 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2455 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2456 2457 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2458 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2459 dmap->dma_cookie.dmac_laddress)); 2460 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2461 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2462 "mbox $%p", 2463 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2464 tx_ring_p->page_valid.value = 0; 2465 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2466 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2467 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2468 tx_ring_p->page_hdl.value = 0; 2469 2470 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2471 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2472 2473 tx_ring_p->max_burst.value = 0; 2474 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2475 2476 *tx_mbox_p = mboxp; 2477 2478 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2479 "<== nxge_map_txdma_channel_cfg_ring")); 2480 } 2481 2482 /*ARGSUSED*/ 2483 static void 2484 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2485 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2486 { 2487 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2488 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2489 tx_ring_p->tdc)); 2490 2491 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2492 2493 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2494 "<== nxge_unmap_txdma_channel_cfg_ring")); 2495 } 2496 2497 /* 2498 * nxge_map_txdma_channel_buf_ring 2499 * 2500 * 2501 * Arguments: 2502 * nxgep 2503 * channel The channel to map. 2504 * dma_buf_p 2505 * tx_desc_p channel's descriptor ring 2506 * num_chunks 2507 * 2508 * Notes: 2509 * 2510 * NPI/NXGE function calls: 2511 * nxge_setup_dma_common() 2512 * 2513 * Registers accessed: 2514 * none. 2515 * 2516 * Context: 2517 * Any domain 2518 */ 2519 static nxge_status_t 2520 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2521 p_nxge_dma_common_t *dma_buf_p, 2522 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2523 { 2524 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2525 p_nxge_dma_common_t dmap; 2526 nxge_os_dma_handle_t tx_buf_dma_handle; 2527 p_tx_ring_t tx_ring_p; 2528 p_tx_msg_t tx_msg_ring; 2529 nxge_status_t status = NXGE_OK; 2530 int ddi_status = DDI_SUCCESS; 2531 int i, j, index; 2532 uint32_t size, bsize; 2533 uint32_t nblocks, nmsgs; 2534 char qname[TASKQ_NAMELEN]; 2535 2536 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2537 "==> nxge_map_txdma_channel_buf_ring")); 2538 2539 dma_bufp = tmp_bufp = *dma_buf_p; 2540 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2541 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2542 "chunks bufp $%p", 2543 channel, num_chunks, dma_bufp)); 2544 2545 nmsgs = 0; 2546 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2547 nmsgs += tmp_bufp->nblocks; 2548 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2549 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2550 "bufp $%p nblocks %d nmsgs %d", 2551 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2552 } 2553 if (!nmsgs) { 2554 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2555 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2556 "no msg blocks", 2557 channel)); 2558 status = NXGE_ERROR; 2559 goto nxge_map_txdma_channel_buf_ring_exit; 2560 } 2561 2562 tx_ring_p = (p_tx_ring_t) 2563 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2564 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2565 (void *)nxgep->interrupt_cookie); 2566 2567 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2568 tx_ring_p->tx_ring_busy = B_FALSE; 2569 tx_ring_p->nxgep = nxgep; 2570 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL; 2571 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d", 2572 nxgep->instance, channel); 2573 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1, 2574 TASKQ_DEFAULTPRI, 0); 2575 if (tx_ring_p->taskq == NULL) { 2576 goto nxge_map_txdma_channel_buf_ring_fail1; 2577 } 2578 2579 /* 2580 * Allocate transmit message rings and handles for packets 2581 * not to be copied to premapped buffers. 2582 */ 2583 size = nmsgs * sizeof (tx_msg_t); 2584 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2585 for (i = 0; i < nmsgs; i++) { 2586 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2587 DDI_DMA_DONTWAIT, 0, 2588 &tx_msg_ring[i].dma_handle); 2589 if (ddi_status != DDI_SUCCESS) { 2590 status |= NXGE_DDI_FAILED; 2591 break; 2592 } 2593 } 2594 if (i < nmsgs) { 2595 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2596 "Allocate handles failed.")); 2597 goto nxge_map_txdma_channel_buf_ring_fail1; 2598 } 2599 2600 tx_ring_p->tdc = channel; 2601 tx_ring_p->tx_msg_ring = tx_msg_ring; 2602 tx_ring_p->tx_ring_size = nmsgs; 2603 tx_ring_p->num_chunks = num_chunks; 2604 if (!nxge_tx_intr_thres) { 2605 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2606 } 2607 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2608 tx_ring_p->rd_index = 0; 2609 tx_ring_p->wr_index = 0; 2610 tx_ring_p->ring_head.value = 0; 2611 tx_ring_p->ring_kick_tail.value = 0; 2612 tx_ring_p->descs_pending = 0; 2613 2614 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2615 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2616 "actual tx desc max %d nmsgs %d " 2617 "(config nxge_tx_ring_size %d)", 2618 channel, tx_ring_p->tx_ring_size, nmsgs, 2619 nxge_tx_ring_size)); 2620 2621 /* 2622 * Map in buffers from the buffer pool. 2623 */ 2624 index = 0; 2625 bsize = dma_bufp->block_size; 2626 2627 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2628 "dma_bufp $%p tx_rng_p $%p " 2629 "tx_msg_rng_p $%p bsize %d", 2630 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2631 2632 tx_buf_dma_handle = dma_bufp->dma_handle; 2633 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2634 bsize = dma_bufp->block_size; 2635 nblocks = dma_bufp->nblocks; 2636 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2637 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2638 "size %d dma_bufp $%p", 2639 i, sizeof (nxge_dma_common_t), dma_bufp)); 2640 2641 for (j = 0; j < nblocks; j++) { 2642 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2643 dmap = &tx_msg_ring[index++].buf_dma; 2644 #ifdef TX_MEM_DEBUG 2645 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2646 "==> nxge_map_txdma_channel_buf_ring: j %d" 2647 "dmap $%p", i, dmap)); 2648 #endif 2649 nxge_setup_dma_common(dmap, dma_bufp, 1, 2650 bsize); 2651 } 2652 } 2653 2654 if (i < num_chunks) { 2655 status = NXGE_ERROR; 2656 goto nxge_map_txdma_channel_buf_ring_fail1; 2657 } 2658 2659 *tx_desc_p = tx_ring_p; 2660 2661 goto nxge_map_txdma_channel_buf_ring_exit; 2662 2663 nxge_map_txdma_channel_buf_ring_fail1: 2664 if (tx_ring_p->taskq) { 2665 ddi_taskq_destroy(tx_ring_p->taskq); 2666 tx_ring_p->taskq = NULL; 2667 } 2668 2669 index--; 2670 for (; index >= 0; index--) { 2671 if (tx_msg_ring[index].dma_handle != NULL) { 2672 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2673 } 2674 } 2675 MUTEX_DESTROY(&tx_ring_p->lock); 2676 KMEM_FREE(tx_msg_ring, size); 2677 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2678 2679 status = NXGE_ERROR; 2680 2681 nxge_map_txdma_channel_buf_ring_exit: 2682 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2683 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2684 2685 return (status); 2686 } 2687 2688 /*ARGSUSED*/ 2689 static void 2690 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2691 { 2692 p_tx_msg_t tx_msg_ring; 2693 p_tx_msg_t tx_msg_p; 2694 int i; 2695 2696 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2697 "==> nxge_unmap_txdma_channel_buf_ring")); 2698 if (tx_ring_p == NULL) { 2699 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2700 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2701 return; 2702 } 2703 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2704 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2705 tx_ring_p->tdc)); 2706 2707 tx_msg_ring = tx_ring_p->tx_msg_ring; 2708 2709 /* 2710 * Since the serialization thread, timer thread and 2711 * interrupt thread can all call the transmit reclaim, 2712 * the unmapping function needs to acquire the lock 2713 * to free those buffers which were transmitted 2714 * by the hardware already. 2715 */ 2716 MUTEX_ENTER(&tx_ring_p->lock); 2717 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2718 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2719 "channel %d", 2720 tx_ring_p->tdc)); 2721 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2722 2723 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2724 tx_msg_p = &tx_msg_ring[i]; 2725 if (tx_msg_p->tx_message != NULL) { 2726 freemsg(tx_msg_p->tx_message); 2727 tx_msg_p->tx_message = NULL; 2728 } 2729 } 2730 2731 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2732 if (tx_msg_ring[i].dma_handle != NULL) { 2733 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2734 } 2735 tx_msg_ring[i].dma_handle = NULL; 2736 } 2737 2738 MUTEX_EXIT(&tx_ring_p->lock); 2739 2740 if (tx_ring_p->taskq) { 2741 ddi_taskq_destroy(tx_ring_p->taskq); 2742 tx_ring_p->taskq = NULL; 2743 } 2744 2745 MUTEX_DESTROY(&tx_ring_p->lock); 2746 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2747 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2748 2749 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2750 "<== nxge_unmap_txdma_channel_buf_ring")); 2751 } 2752 2753 static nxge_status_t 2754 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2755 { 2756 p_tx_rings_t tx_rings; 2757 p_tx_ring_t *tx_desc_rings; 2758 p_tx_mbox_areas_t tx_mbox_areas_p; 2759 p_tx_mbox_t *tx_mbox_p; 2760 nxge_status_t status = NXGE_OK; 2761 2762 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2763 2764 tx_rings = nxgep->tx_rings; 2765 if (tx_rings == NULL) { 2766 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2767 "<== nxge_txdma_hw_start: NULL ring pointer")); 2768 return (NXGE_ERROR); 2769 } 2770 tx_desc_rings = tx_rings->rings; 2771 if (tx_desc_rings == NULL) { 2772 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2773 "<== nxge_txdma_hw_start: NULL ring pointers")); 2774 return (NXGE_ERROR); 2775 } 2776 2777 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2778 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2779 2780 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2781 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2782 2783 status = nxge_txdma_start_channel(nxgep, channel, 2784 (p_tx_ring_t)tx_desc_rings[channel], 2785 (p_tx_mbox_t)tx_mbox_p[channel]); 2786 if (status != NXGE_OK) { 2787 goto nxge_txdma_hw_start_fail1; 2788 } 2789 2790 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2791 "tx_rings $%p rings $%p", 2792 nxgep->tx_rings, nxgep->tx_rings->rings)); 2793 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2794 "tx_rings $%p tx_desc_rings $%p", 2795 nxgep->tx_rings, tx_desc_rings)); 2796 2797 goto nxge_txdma_hw_start_exit; 2798 2799 nxge_txdma_hw_start_fail1: 2800 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2801 "==> nxge_txdma_hw_start: disable " 2802 "(status 0x%x channel %d)", status, channel)); 2803 2804 nxge_txdma_hw_start_exit: 2805 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2806 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2807 2808 return (status); 2809 } 2810 2811 /* 2812 * nxge_txdma_start_channel 2813 * 2814 * Start a TDC. 2815 * 2816 * Arguments: 2817 * nxgep 2818 * channel The channel to start. 2819 * tx_ring_p channel's transmit descriptor ring. 2820 * tx_mbox_p channel' smailbox. 2821 * 2822 * Notes: 2823 * 2824 * NPI/NXGE function calls: 2825 * nxge_reset_txdma_channel() 2826 * nxge_init_txdma_channel_event_mask() 2827 * nxge_enable_txdma_channel() 2828 * 2829 * Registers accessed: 2830 * none directly (see functions above). 2831 * 2832 * Context: 2833 * Any domain 2834 */ 2835 static nxge_status_t 2836 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2837 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2838 2839 { 2840 nxge_status_t status = NXGE_OK; 2841 2842 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2843 "==> nxge_txdma_start_channel (channel %d)", channel)); 2844 /* 2845 * TXDMA/TXC must be in stopped state. 2846 */ 2847 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2848 2849 /* 2850 * Reset TXDMA channel 2851 */ 2852 tx_ring_p->tx_cs.value = 0; 2853 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2854 status = nxge_reset_txdma_channel(nxgep, channel, 2855 tx_ring_p->tx_cs.value); 2856 if (status != NXGE_OK) { 2857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2858 "==> nxge_txdma_start_channel (channel %d)" 2859 " reset channel failed 0x%x", channel, status)); 2860 goto nxge_txdma_start_channel_exit; 2861 } 2862 2863 /* 2864 * Initialize the TXDMA channel specific FZC control 2865 * configurations. These FZC registers are pertaining 2866 * to each TX channel (i.e. logical pages). 2867 */ 2868 if (!isLDOMguest(nxgep)) { 2869 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2870 tx_ring_p, tx_mbox_p); 2871 if (status != NXGE_OK) { 2872 goto nxge_txdma_start_channel_exit; 2873 } 2874 } 2875 2876 /* 2877 * Initialize the event masks. 2878 */ 2879 tx_ring_p->tx_evmask.value = 0; 2880 status = nxge_init_txdma_channel_event_mask(nxgep, 2881 channel, &tx_ring_p->tx_evmask); 2882 if (status != NXGE_OK) { 2883 goto nxge_txdma_start_channel_exit; 2884 } 2885 2886 /* 2887 * Load TXDMA descriptors, buffers, mailbox, 2888 * initialise the DMA channels and 2889 * enable each DMA channel. 2890 */ 2891 status = nxge_enable_txdma_channel(nxgep, channel, 2892 tx_ring_p, tx_mbox_p); 2893 if (status != NXGE_OK) { 2894 goto nxge_txdma_start_channel_exit; 2895 } 2896 2897 nxge_txdma_start_channel_exit: 2898 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2899 2900 return (status); 2901 } 2902 2903 /* 2904 * nxge_txdma_stop_channel 2905 * 2906 * Stop a TDC. 2907 * 2908 * Arguments: 2909 * nxgep 2910 * channel The channel to stop. 2911 * tx_ring_p channel's transmit descriptor ring. 2912 * tx_mbox_p channel' smailbox. 2913 * 2914 * Notes: 2915 * 2916 * NPI/NXGE function calls: 2917 * nxge_txdma_stop_inj_err() 2918 * nxge_reset_txdma_channel() 2919 * nxge_init_txdma_channel_event_mask() 2920 * nxge_init_txdma_channel_cntl_stat() 2921 * nxge_disable_txdma_channel() 2922 * 2923 * Registers accessed: 2924 * none directly (see functions above). 2925 * 2926 * Context: 2927 * Any domain 2928 */ 2929 /*ARGSUSED*/ 2930 static nxge_status_t 2931 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2932 { 2933 p_tx_ring_t tx_ring_p; 2934 int status = NXGE_OK; 2935 2936 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2937 "==> nxge_txdma_stop_channel: channel %d", channel)); 2938 2939 /* 2940 * Stop (disable) TXDMA and TXC (if stop bit is set 2941 * and STOP_N_GO bit not set, the TXDMA reset state will 2942 * not be set if reset TXDMA. 2943 */ 2944 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2945 2946 tx_ring_p = nxgep->tx_rings->rings[channel]; 2947 2948 /* 2949 * Reset TXDMA channel 2950 */ 2951 tx_ring_p->tx_cs.value = 0; 2952 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2953 status = nxge_reset_txdma_channel(nxgep, channel, 2954 tx_ring_p->tx_cs.value); 2955 if (status != NXGE_OK) { 2956 goto nxge_txdma_stop_channel_exit; 2957 } 2958 2959 #ifdef HARDWARE_REQUIRED 2960 /* Set up the interrupt event masks. */ 2961 tx_ring_p->tx_evmask.value = 0; 2962 status = nxge_init_txdma_channel_event_mask(nxgep, 2963 channel, &tx_ring_p->tx_evmask); 2964 if (status != NXGE_OK) { 2965 goto nxge_txdma_stop_channel_exit; 2966 } 2967 2968 /* Initialize the DMA control and status register */ 2969 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2970 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2971 tx_ring_p->tx_cs.value); 2972 if (status != NXGE_OK) { 2973 goto nxge_txdma_stop_channel_exit; 2974 } 2975 2976 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2977 2978 /* Disable channel */ 2979 status = nxge_disable_txdma_channel(nxgep, channel, 2980 tx_ring_p, tx_mbox_p); 2981 if (status != NXGE_OK) { 2982 goto nxge_txdma_start_channel_exit; 2983 } 2984 2985 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2986 "==> nxge_txdma_stop_channel: event done")); 2987 2988 #endif 2989 2990 nxge_txdma_stop_channel_exit: 2991 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2992 return (status); 2993 } 2994 2995 /* 2996 * nxge_txdma_get_ring 2997 * 2998 * Get the ring for a TDC. 2999 * 3000 * Arguments: 3001 * nxgep 3002 * channel 3003 * 3004 * Notes: 3005 * 3006 * NPI/NXGE function calls: 3007 * 3008 * Registers accessed: 3009 * 3010 * Context: 3011 * Any domain 3012 */ 3013 static p_tx_ring_t 3014 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 3015 { 3016 nxge_grp_set_t *set = &nxgep->tx_set; 3017 int tdc; 3018 3019 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 3020 3021 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3022 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3023 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 3024 goto return_null; 3025 } 3026 3027 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3028 if ((1 << tdc) & set->owned.map) { 3029 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3030 if (ring) { 3031 if (channel == ring->tdc) { 3032 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3033 "<== nxge_txdma_get_ring: " 3034 "tdc %d ring $%p", tdc, ring)); 3035 return (ring); 3036 } 3037 } 3038 } 3039 } 3040 3041 return_null: 3042 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 3043 "ring not found")); 3044 3045 return (NULL); 3046 } 3047 3048 /* 3049 * nxge_txdma_get_mbox 3050 * 3051 * Get the mailbox for a TDC. 3052 * 3053 * Arguments: 3054 * nxgep 3055 * channel 3056 * 3057 * Notes: 3058 * 3059 * NPI/NXGE function calls: 3060 * 3061 * Registers accessed: 3062 * 3063 * Context: 3064 * Any domain 3065 */ 3066 static p_tx_mbox_t 3067 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3068 { 3069 nxge_grp_set_t *set = &nxgep->tx_set; 3070 int tdc; 3071 3072 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3073 3074 if (nxgep->tx_mbox_areas_p == 0 || 3075 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3076 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3077 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3078 goto return_null; 3079 } 3080 3081 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3082 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3083 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3084 goto return_null; 3085 } 3086 3087 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3088 if ((1 << tdc) & set->owned.map) { 3089 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3090 if (ring) { 3091 if (channel == ring->tdc) { 3092 tx_mbox_t *mailbox = nxgep-> 3093 tx_mbox_areas_p-> 3094 txmbox_areas_p[tdc]; 3095 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3096 "<== nxge_txdma_get_mbox: tdc %d " 3097 "ring $%p", tdc, mailbox)); 3098 return (mailbox); 3099 } 3100 } 3101 } 3102 } 3103 3104 return_null: 3105 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3106 "mailbox not found")); 3107 3108 return (NULL); 3109 } 3110 3111 /* 3112 * nxge_tx_err_evnts 3113 * 3114 * Recover a TDC. 3115 * 3116 * Arguments: 3117 * nxgep 3118 * index The index to the TDC ring. 3119 * ldvp Used to get the channel number ONLY. 3120 * cs A copy of the bits from TX_CS. 3121 * 3122 * Notes: 3123 * Calling tree: 3124 * nxge_tx_intr() 3125 * 3126 * NPI/NXGE function calls: 3127 * npi_txdma_ring_error_get() 3128 * npi_txdma_inj_par_error_get() 3129 * nxge_txdma_fatal_err_recover() 3130 * 3131 * Registers accessed: 3132 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3133 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3134 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3135 * 3136 * Context: 3137 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3138 */ 3139 /*ARGSUSED*/ 3140 static nxge_status_t 3141 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3142 { 3143 npi_handle_t handle; 3144 npi_status_t rs; 3145 uint8_t channel; 3146 p_tx_ring_t *tx_rings; 3147 p_tx_ring_t tx_ring_p; 3148 p_nxge_tx_ring_stats_t tdc_stats; 3149 boolean_t txchan_fatal = B_FALSE; 3150 nxge_status_t status = NXGE_OK; 3151 tdmc_inj_par_err_t par_err; 3152 uint32_t value; 3153 3154 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3155 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3156 channel = ldvp->channel; 3157 3158 tx_rings = nxgep->tx_rings->rings; 3159 tx_ring_p = tx_rings[index]; 3160 tdc_stats = tx_ring_p->tdc_stats; 3161 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3162 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3163 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3164 if ((rs = npi_txdma_ring_error_get(handle, channel, 3165 &tdc_stats->errlog)) != NPI_SUCCESS) 3166 return (NXGE_ERROR | rs); 3167 } 3168 3169 if (cs.bits.ldw.mbox_err) { 3170 tdc_stats->mbox_err++; 3171 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3172 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3173 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3174 "==> nxge_tx_err_evnts(channel %d): " 3175 "fatal error: mailbox", channel)); 3176 txchan_fatal = B_TRUE; 3177 } 3178 if (cs.bits.ldw.pkt_size_err) { 3179 tdc_stats->pkt_size_err++; 3180 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3181 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3182 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3183 "==> nxge_tx_err_evnts(channel %d): " 3184 "fatal error: pkt_size_err", channel)); 3185 txchan_fatal = B_TRUE; 3186 } 3187 if (cs.bits.ldw.tx_ring_oflow) { 3188 tdc_stats->tx_ring_oflow++; 3189 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3190 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3192 "==> nxge_tx_err_evnts(channel %d): " 3193 "fatal error: tx_ring_oflow", channel)); 3194 txchan_fatal = B_TRUE; 3195 } 3196 if (cs.bits.ldw.pref_buf_par_err) { 3197 tdc_stats->pre_buf_par_err++; 3198 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3199 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3201 "==> nxge_tx_err_evnts(channel %d): " 3202 "fatal error: pre_buf_par_err", channel)); 3203 /* Clear error injection source for parity error */ 3204 (void) npi_txdma_inj_par_error_get(handle, &value); 3205 par_err.value = value; 3206 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3207 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3208 txchan_fatal = B_TRUE; 3209 } 3210 if (cs.bits.ldw.nack_pref) { 3211 tdc_stats->nack_pref++; 3212 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3213 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3214 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3215 "==> nxge_tx_err_evnts(channel %d): " 3216 "fatal error: nack_pref", channel)); 3217 txchan_fatal = B_TRUE; 3218 } 3219 if (cs.bits.ldw.nack_pkt_rd) { 3220 tdc_stats->nack_pkt_rd++; 3221 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3222 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3223 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3224 "==> nxge_tx_err_evnts(channel %d): " 3225 "fatal error: nack_pkt_rd", channel)); 3226 txchan_fatal = B_TRUE; 3227 } 3228 if (cs.bits.ldw.conf_part_err) { 3229 tdc_stats->conf_part_err++; 3230 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3231 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3233 "==> nxge_tx_err_evnts(channel %d): " 3234 "fatal error: config_partition_err", channel)); 3235 txchan_fatal = B_TRUE; 3236 } 3237 if (cs.bits.ldw.pkt_prt_err) { 3238 tdc_stats->pkt_part_err++; 3239 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3240 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3242 "==> nxge_tx_err_evnts(channel %d): " 3243 "fatal error: pkt_prt_err", channel)); 3244 txchan_fatal = B_TRUE; 3245 } 3246 3247 /* Clear error injection source in case this is an injected error */ 3248 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3249 3250 if (txchan_fatal) { 3251 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3252 " nxge_tx_err_evnts: " 3253 " fatal error on channel %d cs 0x%llx\n", 3254 channel, cs.value)); 3255 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3256 tx_ring_p); 3257 if (status == NXGE_OK) { 3258 FM_SERVICE_RESTORED(nxgep); 3259 } 3260 } 3261 3262 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3263 3264 return (status); 3265 } 3266 3267 static nxge_status_t 3268 nxge_txdma_fatal_err_recover( 3269 p_nxge_t nxgep, 3270 uint16_t channel, 3271 p_tx_ring_t tx_ring_p) 3272 { 3273 npi_handle_t handle; 3274 npi_status_t rs = NPI_SUCCESS; 3275 p_tx_mbox_t tx_mbox_p; 3276 nxge_status_t status = NXGE_OK; 3277 3278 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3280 "Recovering from TxDMAChannel#%d error...", channel)); 3281 3282 /* 3283 * Stop the dma channel waits for the stop done. 3284 * If the stop done bit is not set, then create 3285 * an error. 3286 */ 3287 3288 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3289 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3290 MUTEX_ENTER(&tx_ring_p->lock); 3291 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3292 if (rs != NPI_SUCCESS) { 3293 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3294 "==> nxge_txdma_fatal_err_recover (channel %d): " 3295 "stop failed ", channel)); 3296 goto fail; 3297 } 3298 3299 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3300 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3301 3302 /* 3303 * Reset TXDMA channel 3304 */ 3305 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3306 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3307 NPI_SUCCESS) { 3308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3309 "==> nxge_txdma_fatal_err_recover (channel %d)" 3310 " reset channel failed 0x%x", channel, rs)); 3311 goto fail; 3312 } 3313 3314 /* 3315 * Reset the tail (kick) register to 0. 3316 * (Hardware will not reset it. Tx overflow fatal 3317 * error if tail is not set to 0 after reset! 3318 */ 3319 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3320 3321 /* Restart TXDMA channel */ 3322 3323 if (!isLDOMguest(nxgep)) { 3324 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3325 3326 // XXX This is a problem in HIO! 3327 /* 3328 * Initialize the TXDMA channel specific FZC control 3329 * configurations. These FZC registers are pertaining 3330 * to each TX channel (i.e. logical pages). 3331 */ 3332 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3333 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3334 tx_ring_p, tx_mbox_p); 3335 if (status != NXGE_OK) 3336 goto fail; 3337 } 3338 3339 /* 3340 * Initialize the event masks. 3341 */ 3342 tx_ring_p->tx_evmask.value = 0; 3343 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3344 &tx_ring_p->tx_evmask); 3345 if (status != NXGE_OK) 3346 goto fail; 3347 3348 tx_ring_p->wr_index_wrap = B_FALSE; 3349 tx_ring_p->wr_index = 0; 3350 tx_ring_p->rd_index = 0; 3351 3352 /* 3353 * Load TXDMA descriptors, buffers, mailbox, 3354 * initialise the DMA channels and 3355 * enable each DMA channel. 3356 */ 3357 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3358 status = nxge_enable_txdma_channel(nxgep, channel, 3359 tx_ring_p, tx_mbox_p); 3360 MUTEX_EXIT(&tx_ring_p->lock); 3361 if (status != NXGE_OK) 3362 goto fail; 3363 3364 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3365 "Recovery Successful, TxDMAChannel#%d Restored", 3366 channel)); 3367 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3368 3369 return (NXGE_OK); 3370 3371 fail: 3372 MUTEX_EXIT(&tx_ring_p->lock); 3373 3374 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3375 "nxge_txdma_fatal_err_recover (channel %d): " 3376 "failed to recover this txdma channel", channel)); 3377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3378 3379 return (status); 3380 } 3381 3382 /* 3383 * nxge_tx_port_fatal_err_recover 3384 * 3385 * Attempt to recover from a fatal port error. 3386 * 3387 * Arguments: 3388 * nxgep 3389 * 3390 * Notes: 3391 * How would a guest do this? 3392 * 3393 * NPI/NXGE function calls: 3394 * 3395 * Registers accessed: 3396 * 3397 * Context: 3398 * Service domain 3399 */ 3400 nxge_status_t 3401 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3402 { 3403 nxge_grp_set_t *set = &nxgep->tx_set; 3404 nxge_channel_t tdc; 3405 3406 tx_ring_t *ring; 3407 tx_mbox_t *mailbox; 3408 3409 npi_handle_t handle; 3410 nxge_status_t status; 3411 npi_status_t rs; 3412 3413 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3414 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3415 "Recovering from TxPort error...")); 3416 3417 if (isLDOMguest(nxgep)) { 3418 return (NXGE_OK); 3419 } 3420 3421 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3422 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3423 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3424 return (NXGE_ERROR); 3425 } 3426 3427 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3428 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3429 "<== nxge_tx_port_fatal_err_recover: " 3430 "NULL ring pointer(s)")); 3431 return (NXGE_ERROR); 3432 } 3433 3434 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3435 if ((1 << tdc) & set->owned.map) { 3436 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3437 if (ring) 3438 MUTEX_ENTER(&ring->lock); 3439 } 3440 } 3441 3442 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3443 3444 /* 3445 * Stop all the TDCs owned by us. 3446 * (The shared TDCs will have been stopped by their owners.) 3447 */ 3448 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3449 if ((1 << tdc) & set->owned.map) { 3450 ring = nxgep->tx_rings->rings[tdc]; 3451 if (ring) { 3452 rs = npi_txdma_channel_control 3453 (handle, TXDMA_STOP, tdc); 3454 if (rs != NPI_SUCCESS) { 3455 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3456 "nxge_tx_port_fatal_err_recover " 3457 "(channel %d): stop failed ", tdc)); 3458 goto fail; 3459 } 3460 } 3461 } 3462 } 3463 3464 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3465 3466 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3467 if ((1 << tdc) & set->owned.map) { 3468 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3469 if (ring) { 3470 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3471 } 3472 } 3473 } 3474 3475 /* 3476 * Reset all the TDCs. 3477 */ 3478 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3479 3480 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3481 if ((1 << tdc) & set->owned.map) { 3482 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3483 if (ring) { 3484 if ((rs = npi_txdma_channel_control 3485 (handle, TXDMA_RESET, tdc)) 3486 != NPI_SUCCESS) { 3487 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3488 "nxge_tx_port_fatal_err_recover " 3489 "(channel %d) reset channel " 3490 "failed 0x%x", tdc, rs)); 3491 goto fail; 3492 } 3493 } 3494 /* 3495 * Reset the tail (kick) register to 0. 3496 * (Hardware will not reset it. Tx overflow fatal 3497 * error if tail is not set to 0 after reset! 3498 */ 3499 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3500 } 3501 } 3502 3503 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3504 3505 /* Restart all the TDCs */ 3506 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3507 if ((1 << tdc) & set->owned.map) { 3508 ring = nxgep->tx_rings->rings[tdc]; 3509 if (ring) { 3510 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3511 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3512 ring, mailbox); 3513 ring->tx_evmask.value = 0; 3514 /* 3515 * Initialize the event masks. 3516 */ 3517 status = nxge_init_txdma_channel_event_mask 3518 (nxgep, tdc, &ring->tx_evmask); 3519 3520 ring->wr_index_wrap = B_FALSE; 3521 ring->wr_index = 0; 3522 ring->rd_index = 0; 3523 3524 if (status != NXGE_OK) 3525 goto fail; 3526 if (status != NXGE_OK) 3527 goto fail; 3528 } 3529 } 3530 } 3531 3532 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3533 3534 /* Re-enable all the TDCs */ 3535 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3536 if ((1 << tdc) & set->owned.map) { 3537 ring = nxgep->tx_rings->rings[tdc]; 3538 if (ring) { 3539 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3540 status = nxge_enable_txdma_channel(nxgep, tdc, 3541 ring, mailbox); 3542 if (status != NXGE_OK) 3543 goto fail; 3544 } 3545 } 3546 } 3547 3548 /* 3549 * Unlock all the TDCs. 3550 */ 3551 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3552 if ((1 << tdc) & set->owned.map) { 3553 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3554 if (ring) 3555 MUTEX_EXIT(&ring->lock); 3556 } 3557 } 3558 3559 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3560 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3561 3562 return (NXGE_OK); 3563 3564 fail: 3565 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3566 if ((1 << tdc) & set->owned.map) { 3567 ring = nxgep->tx_rings->rings[tdc]; 3568 if (ring) 3569 MUTEX_EXIT(&ring->lock); 3570 } 3571 } 3572 3573 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3574 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3575 3576 return (status); 3577 } 3578 3579 /* 3580 * nxge_txdma_inject_err 3581 * 3582 * Inject an error into a TDC. 3583 * 3584 * Arguments: 3585 * nxgep 3586 * err_id The error to inject. 3587 * chan The channel to inject into. 3588 * 3589 * Notes: 3590 * This is called from nxge_main.c:nxge_err_inject() 3591 * Has this ioctl ever been used? 3592 * 3593 * NPI/NXGE function calls: 3594 * npi_txdma_inj_par_error_get() 3595 * npi_txdma_inj_par_error_set() 3596 * 3597 * Registers accessed: 3598 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3599 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3600 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3601 * 3602 * Context: 3603 * Service domain 3604 */ 3605 void 3606 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3607 { 3608 tdmc_intr_dbg_t tdi; 3609 tdmc_inj_par_err_t par_err; 3610 uint32_t value; 3611 npi_handle_t handle; 3612 3613 switch (err_id) { 3614 3615 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3616 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3617 /* Clear error injection source for parity error */ 3618 (void) npi_txdma_inj_par_error_get(handle, &value); 3619 par_err.value = value; 3620 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3621 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3622 3623 par_err.bits.ldw.inject_parity_error = (1 << chan); 3624 (void) npi_txdma_inj_par_error_get(handle, &value); 3625 par_err.value = value; 3626 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3627 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3628 (unsigned long long)par_err.value); 3629 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3630 break; 3631 3632 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3633 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3634 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3635 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3636 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3637 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3638 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3639 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3640 chan, &tdi.value); 3641 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3642 tdi.bits.ldw.pref_buf_par_err = 1; 3643 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3644 tdi.bits.ldw.mbox_err = 1; 3645 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3646 tdi.bits.ldw.nack_pref = 1; 3647 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3648 tdi.bits.ldw.nack_pkt_rd = 1; 3649 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3650 tdi.bits.ldw.pkt_size_err = 1; 3651 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3652 tdi.bits.ldw.tx_ring_oflow = 1; 3653 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3654 tdi.bits.ldw.conf_part_err = 1; 3655 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3656 tdi.bits.ldw.pkt_part_err = 1; 3657 #if defined(__i386) 3658 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3659 tdi.value); 3660 #else 3661 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3662 tdi.value); 3663 #endif 3664 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3665 chan, tdi.value); 3666 3667 break; 3668 } 3669 } 3670