1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_txdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 #include <npi_tx_rd64.h> 30 #include <npi_tx_wr64.h> 31 #include <sys/llc1.h> 32 33 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 34 uint32_t nxge_tx_minfree = 64; 35 uint32_t nxge_tx_intr_thres = 0; 36 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 37 uint32_t nxge_tx_tiny_pack = 1; 38 uint32_t nxge_tx_use_bcopy = 1; 39 40 extern uint32_t nxge_tx_ring_size; 41 extern uint32_t nxge_bcopy_thresh; 42 extern uint32_t nxge_dvma_thresh; 43 extern uint32_t nxge_dma_stream_thresh; 44 extern dma_method_t nxge_force_dma; 45 extern uint32_t nxge_cksum_offload; 46 47 /* Device register access attributes for PIO. */ 48 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 49 /* Device descriptor access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 51 /* Device buffer access attributes for DMA. */ 52 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 53 extern ddi_dma_attr_t nxge_desc_dma_attr; 54 extern ddi_dma_attr_t nxge_tx_dma_attr; 55 56 extern void nxge_tx_ring_task(void *arg); 57 58 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 59 60 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 67 68 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 69 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 70 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 71 72 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 73 p_nxge_dma_common_t *, p_tx_ring_t, 74 p_tx_mbox_t *); 75 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 76 p_tx_ring_t, p_tx_mbox_t); 77 78 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 79 p_tx_ring_t, p_tx_mbox_t); 80 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 81 82 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 83 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 84 p_nxge_ldv_t, tx_cs_t); 85 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 86 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 87 uint16_t, p_tx_ring_t); 88 89 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 90 p_tx_ring_t ring_p, uint16_t channel); 91 92 nxge_status_t 93 nxge_init_txdma_channels(p_nxge_t nxgep) 94 { 95 nxge_grp_set_t *set = &nxgep->tx_set; 96 int i, tdc, count; 97 nxge_grp_t *group; 98 dc_map_t map; 99 int dev_gindex; 100 101 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 102 103 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 104 if ((1 << i) & set->lg.map) { 105 group = set->group[i]; 106 dev_gindex = 107 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 108 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 109 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 110 if ((1 << tdc) & map) { 111 if ((nxge_grp_dc_add(nxgep, 112 group, VP_BOUND_TX, tdc))) 113 goto init_txdma_channels_exit; 114 } 115 } 116 } 117 if (++count == set->lg.count) 118 break; 119 } 120 121 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 122 return (NXGE_OK); 123 124 init_txdma_channels_exit: 125 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 126 if ((1 << i) & set->lg.map) { 127 group = set->group[i]; 128 dev_gindex = 129 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 130 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 131 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 132 if ((1 << tdc) & map) { 133 nxge_grp_dc_remove(nxgep, 134 VP_BOUND_TX, tdc); 135 } 136 } 137 } 138 if (++count == set->lg.count) 139 break; 140 } 141 142 return (NXGE_ERROR); 143 144 } 145 146 nxge_status_t 147 nxge_init_txdma_channel( 148 p_nxge_t nxge, 149 int channel) 150 { 151 nxge_status_t status; 152 153 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 154 155 status = nxge_map_txdma(nxge, channel); 156 if (status != NXGE_OK) { 157 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 158 "<== nxge_init_txdma_channel: status 0x%x", status)); 159 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 160 return (status); 161 } 162 163 status = nxge_txdma_hw_start(nxge, channel); 164 if (status != NXGE_OK) { 165 (void) nxge_unmap_txdma_channel(nxge, channel); 166 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 167 return (status); 168 } 169 170 if (!nxge->statsp->tdc_ksp[channel]) 171 nxge_setup_tdc_kstats(nxge, channel); 172 173 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 174 175 return (status); 176 } 177 178 void 179 nxge_uninit_txdma_channels(p_nxge_t nxgep) 180 { 181 nxge_grp_set_t *set = &nxgep->tx_set; 182 int tdc; 183 184 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 185 186 if (set->owned.map == 0) { 187 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 188 "nxge_uninit_txdma_channels: no channels")); 189 return; 190 } 191 192 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 193 if ((1 << tdc) & set->owned.map) { 194 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 195 } 196 } 197 198 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 199 } 200 201 void 202 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 203 { 204 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 205 206 if (nxgep->statsp->tdc_ksp[channel]) { 207 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 208 nxgep->statsp->tdc_ksp[channel] = 0; 209 } 210 211 (void) nxge_txdma_stop_channel(nxgep, channel); 212 nxge_unmap_txdma_channel(nxgep, channel); 213 214 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 215 "<== nxge_uninit_txdma_channel")); 216 } 217 218 void 219 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 220 uint32_t entries, uint32_t size) 221 { 222 size_t tsize; 223 *dest_p = *src_p; 224 tsize = size * entries; 225 dest_p->alength = tsize; 226 dest_p->nblocks = entries; 227 dest_p->block_size = size; 228 dest_p->offset += tsize; 229 230 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 231 src_p->alength -= tsize; 232 src_p->dma_cookie.dmac_laddress += tsize; 233 src_p->dma_cookie.dmac_size -= tsize; 234 } 235 236 /* 237 * nxge_reset_txdma_channel 238 * 239 * Reset a TDC. 240 * 241 * Arguments: 242 * nxgep 243 * channel The channel to reset. 244 * reg_data The current TX_CS. 245 * 246 * Notes: 247 * 248 * NPI/NXGE function calls: 249 * npi_txdma_channel_reset() 250 * npi_txdma_channel_control() 251 * 252 * Registers accessed: 253 * TX_CS DMC+0x40028 Transmit Control And Status 254 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 255 * 256 * Context: 257 * Any domain 258 */ 259 nxge_status_t 260 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 261 { 262 npi_status_t rs = NPI_SUCCESS; 263 nxge_status_t status = NXGE_OK; 264 npi_handle_t handle; 265 266 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 267 268 handle = NXGE_DEV_NPI_HANDLE(nxgep); 269 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 270 rs = npi_txdma_channel_reset(handle, channel); 271 } else { 272 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 273 channel); 274 } 275 276 if (rs != NPI_SUCCESS) { 277 status = NXGE_ERROR | rs; 278 } 279 280 /* 281 * Reset the tail (kick) register to 0. 282 * (Hardware will not reset it. Tx overflow fatal 283 * error if tail is not set to 0 after reset! 284 */ 285 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 286 287 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 288 return (status); 289 } 290 291 /* 292 * nxge_init_txdma_channel_event_mask 293 * 294 * Enable interrupts for a set of events. 295 * 296 * Arguments: 297 * nxgep 298 * channel The channel to map. 299 * mask_p The events to enable. 300 * 301 * Notes: 302 * 303 * NPI/NXGE function calls: 304 * npi_txdma_event_mask() 305 * 306 * Registers accessed: 307 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 308 * 309 * Context: 310 * Any domain 311 */ 312 nxge_status_t 313 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 314 p_tx_dma_ent_msk_t mask_p) 315 { 316 npi_handle_t handle; 317 npi_status_t rs = NPI_SUCCESS; 318 nxge_status_t status = NXGE_OK; 319 320 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 321 "<== nxge_init_txdma_channel_event_mask")); 322 323 handle = NXGE_DEV_NPI_HANDLE(nxgep); 324 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 325 if (rs != NPI_SUCCESS) { 326 status = NXGE_ERROR | rs; 327 } 328 329 return (status); 330 } 331 332 /* 333 * nxge_init_txdma_channel_cntl_stat 334 * 335 * Stop a TDC. If at first we don't succeed, inject an error. 336 * 337 * Arguments: 338 * nxgep 339 * channel The channel to stop. 340 * 341 * Notes: 342 * 343 * NPI/NXGE function calls: 344 * npi_txdma_control_status() 345 * 346 * Registers accessed: 347 * TX_CS DMC+0x40028 Transmit Control And Status 348 * 349 * Context: 350 * Any domain 351 */ 352 nxge_status_t 353 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 354 uint64_t reg_data) 355 { 356 npi_handle_t handle; 357 npi_status_t rs = NPI_SUCCESS; 358 nxge_status_t status = NXGE_OK; 359 360 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 361 "<== nxge_init_txdma_channel_cntl_stat")); 362 363 handle = NXGE_DEV_NPI_HANDLE(nxgep); 364 rs = npi_txdma_control_status(handle, OP_SET, channel, 365 (p_tx_cs_t)®_data); 366 367 if (rs != NPI_SUCCESS) { 368 status = NXGE_ERROR | rs; 369 } 370 371 return (status); 372 } 373 374 /* 375 * nxge_enable_txdma_channel 376 * 377 * Enable a TDC. 378 * 379 * Arguments: 380 * nxgep 381 * channel The channel to enable. 382 * tx_desc_p channel's transmit descriptor ring. 383 * mbox_p channel's mailbox, 384 * 385 * Notes: 386 * 387 * NPI/NXGE function calls: 388 * npi_txdma_ring_config() 389 * npi_txdma_mbox_config() 390 * npi_txdma_channel_init_enable() 391 * 392 * Registers accessed: 393 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 394 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 395 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 396 * TX_CS DMC+0x40028 Transmit Control And Status 397 * 398 * Context: 399 * Any domain 400 */ 401 nxge_status_t 402 nxge_enable_txdma_channel(p_nxge_t nxgep, 403 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 404 { 405 npi_handle_t handle; 406 npi_status_t rs = NPI_SUCCESS; 407 nxge_status_t status = NXGE_OK; 408 409 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 410 411 handle = NXGE_DEV_NPI_HANDLE(nxgep); 412 /* 413 * Use configuration data composed at init time. 414 * Write to hardware the transmit ring configurations. 415 */ 416 rs = npi_txdma_ring_config(handle, OP_SET, channel, 417 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 418 419 if (rs != NPI_SUCCESS) { 420 return (NXGE_ERROR | rs); 421 } 422 423 if (isLDOMguest(nxgep)) { 424 /* Add interrupt handler for this channel. */ 425 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 426 return (NXGE_ERROR); 427 } 428 429 /* Write to hardware the mailbox */ 430 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 431 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 432 433 if (rs != NPI_SUCCESS) { 434 return (NXGE_ERROR | rs); 435 } 436 437 /* Start the DMA engine. */ 438 rs = npi_txdma_channel_init_enable(handle, channel); 439 440 if (rs != NPI_SUCCESS) { 441 return (NXGE_ERROR | rs); 442 } 443 444 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 445 446 return (status); 447 } 448 449 void 450 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 451 boolean_t l4_cksum, int pkt_len, uint8_t npads, 452 p_tx_pkt_hdr_all_t pkthdrp, 453 t_uscalar_t start_offset, 454 t_uscalar_t stuff_offset) 455 { 456 p_tx_pkt_header_t hdrp; 457 p_mblk_t nmp; 458 uint64_t tmp; 459 size_t mblk_len; 460 size_t iph_len; 461 size_t hdrs_size; 462 uint8_t hdrs_buf[sizeof (struct ether_header) + 463 64 + sizeof (uint32_t)]; 464 uint8_t *cursor; 465 uint8_t *ip_buf; 466 uint16_t eth_type; 467 uint8_t ipproto; 468 boolean_t is_vlan = B_FALSE; 469 size_t eth_hdr_size; 470 471 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 472 473 /* 474 * Caller should zero out the headers first. 475 */ 476 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 477 478 if (fill_len) { 479 NXGE_DEBUG_MSG((NULL, TX_CTL, 480 "==> nxge_fill_tx_hdr: pkt_len %d " 481 "npads %d", pkt_len, npads)); 482 tmp = (uint64_t)pkt_len; 483 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 484 goto fill_tx_header_done; 485 } 486 487 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 488 489 /* 490 * mp is the original data packet (does not include the 491 * Neptune transmit header). 492 */ 493 nmp = mp; 494 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 495 "mp $%p b_rptr $%p len %d", 496 mp, nmp->b_rptr, MBLKL(nmp))); 497 /* copy ether_header from mblk to hdrs_buf */ 498 cursor = &hdrs_buf[0]; 499 tmp = sizeof (struct ether_vlan_header); 500 while ((nmp != NULL) && (tmp > 0)) { 501 size_t buflen; 502 mblk_len = MBLKL(nmp); 503 buflen = min((size_t)tmp, mblk_len); 504 bcopy(nmp->b_rptr, cursor, buflen); 505 cursor += buflen; 506 tmp -= buflen; 507 nmp = nmp->b_cont; 508 } 509 510 nmp = mp; 511 mblk_len = MBLKL(nmp); 512 ip_buf = NULL; 513 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 514 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 515 "ether type 0x%x", eth_type, hdrp->value)); 516 517 if (eth_type < ETHERMTU) { 518 tmp = 1ull; 519 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 520 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 521 "value 0x%llx", hdrp->value)); 522 if (*(hdrs_buf + sizeof (struct ether_header)) 523 == LLC_SNAP_SAP) { 524 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 525 sizeof (struct ether_header) + 6))); 526 NXGE_DEBUG_MSG((NULL, TX_CTL, 527 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 528 eth_type)); 529 } else { 530 goto fill_tx_header_done; 531 } 532 } else if (eth_type == VLAN_ETHERTYPE) { 533 tmp = 1ull; 534 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 535 536 eth_type = ntohs(((struct ether_vlan_header *) 537 hdrs_buf)->ether_type); 538 is_vlan = B_TRUE; 539 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 540 "value 0x%llx", hdrp->value)); 541 } 542 543 if (!is_vlan) { 544 eth_hdr_size = sizeof (struct ether_header); 545 } else { 546 eth_hdr_size = sizeof (struct ether_vlan_header); 547 } 548 549 switch (eth_type) { 550 case ETHERTYPE_IP: 551 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 552 ip_buf = nmp->b_rptr + eth_hdr_size; 553 mblk_len -= eth_hdr_size; 554 iph_len = ((*ip_buf) & 0x0f); 555 if (mblk_len > (iph_len + sizeof (uint32_t))) { 556 ip_buf = nmp->b_rptr; 557 ip_buf += eth_hdr_size; 558 } else { 559 ip_buf = NULL; 560 } 561 562 } 563 if (ip_buf == NULL) { 564 hdrs_size = 0; 565 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 566 while ((nmp) && (hdrs_size < 567 sizeof (hdrs_buf))) { 568 mblk_len = (size_t)nmp->b_wptr - 569 (size_t)nmp->b_rptr; 570 if (mblk_len >= 571 (sizeof (hdrs_buf) - hdrs_size)) 572 mblk_len = sizeof (hdrs_buf) - 573 hdrs_size; 574 bcopy(nmp->b_rptr, 575 &hdrs_buf[hdrs_size], mblk_len); 576 hdrs_size += mblk_len; 577 nmp = nmp->b_cont; 578 } 579 ip_buf = hdrs_buf; 580 ip_buf += eth_hdr_size; 581 iph_len = ((*ip_buf) & 0x0f); 582 } 583 584 ipproto = ip_buf[9]; 585 586 tmp = (uint64_t)iph_len; 587 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 588 tmp = (uint64_t)(eth_hdr_size >> 1); 589 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 590 591 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 592 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 593 "tmp 0x%x", 594 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 595 ipproto, tmp)); 596 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 597 "value 0x%llx", hdrp->value)); 598 599 break; 600 601 case ETHERTYPE_IPV6: 602 hdrs_size = 0; 603 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 604 while ((nmp) && (hdrs_size < 605 sizeof (hdrs_buf))) { 606 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 607 if (mblk_len >= 608 (sizeof (hdrs_buf) - hdrs_size)) 609 mblk_len = sizeof (hdrs_buf) - 610 hdrs_size; 611 bcopy(nmp->b_rptr, 612 &hdrs_buf[hdrs_size], mblk_len); 613 hdrs_size += mblk_len; 614 nmp = nmp->b_cont; 615 } 616 ip_buf = hdrs_buf; 617 ip_buf += eth_hdr_size; 618 619 tmp = 1ull; 620 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 621 622 tmp = (eth_hdr_size >> 1); 623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 624 625 /* byte 6 is the next header protocol */ 626 ipproto = ip_buf[6]; 627 628 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 629 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 630 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 631 ipproto)); 632 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 633 "value 0x%llx", hdrp->value)); 634 635 break; 636 637 default: 638 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 639 goto fill_tx_header_done; 640 } 641 642 switch (ipproto) { 643 case IPPROTO_TCP: 644 NXGE_DEBUG_MSG((NULL, TX_CTL, 645 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 646 if (l4_cksum) { 647 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 648 hdrp->value |= 649 (((uint64_t)(start_offset >> 1)) << 650 TX_PKT_HEADER_L4START_SHIFT); 651 hdrp->value |= 652 (((uint64_t)(stuff_offset >> 1)) << 653 TX_PKT_HEADER_L4STUFF_SHIFT); 654 655 NXGE_DEBUG_MSG((NULL, TX_CTL, 656 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 657 "value 0x%llx", hdrp->value)); 658 } 659 660 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 661 "value 0x%llx", hdrp->value)); 662 break; 663 664 case IPPROTO_UDP: 665 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 666 if (l4_cksum) { 667 if (!nxge_cksum_offload) { 668 uint16_t *up; 669 uint16_t cksum; 670 t_uscalar_t stuff_len; 671 672 /* 673 * The checksum field has the 674 * partial checksum. 675 * IP_CSUM() macro calls ip_cksum() which 676 * can add in the partial checksum. 677 */ 678 cksum = IP_CSUM(mp, start_offset, 0); 679 stuff_len = stuff_offset; 680 nmp = mp; 681 mblk_len = MBLKL(nmp); 682 while ((nmp != NULL) && 683 (mblk_len < stuff_len)) { 684 stuff_len -= mblk_len; 685 nmp = nmp->b_cont; 686 } 687 ASSERT(nmp); 688 up = (uint16_t *)(nmp->b_rptr + stuff_len); 689 690 *up = cksum; 691 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 692 NXGE_DEBUG_MSG((NULL, TX_CTL, 693 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 694 "use sw cksum " 695 "write to $%p cksum 0x%x content up 0x%x", 696 stuff_len, 697 up, 698 cksum, 699 *up)); 700 } else { 701 /* Hardware will compute the full checksum */ 702 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 703 hdrp->value |= 704 (((uint64_t)(start_offset >> 1)) << 705 TX_PKT_HEADER_L4START_SHIFT); 706 hdrp->value |= 707 (((uint64_t)(stuff_offset >> 1)) << 708 TX_PKT_HEADER_L4STUFF_SHIFT); 709 710 NXGE_DEBUG_MSG((NULL, TX_CTL, 711 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 712 " use partial checksum " 713 "cksum 0x%x ", 714 "value 0x%llx", 715 stuff_offset, 716 IP_CSUM(mp, start_offset, 0), 717 hdrp->value)); 718 } 719 } 720 721 NXGE_DEBUG_MSG((NULL, TX_CTL, 722 "==> nxge_tx_pkt_hdr_init: UDP" 723 "value 0x%llx", hdrp->value)); 724 break; 725 726 default: 727 goto fill_tx_header_done; 728 } 729 730 fill_tx_header_done: 731 NXGE_DEBUG_MSG((NULL, TX_CTL, 732 "==> nxge_fill_tx_hdr: pkt_len %d " 733 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 734 735 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 736 } 737 738 /*ARGSUSED*/ 739 p_mblk_t 740 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 741 { 742 p_mblk_t newmp = NULL; 743 744 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 745 NXGE_DEBUG_MSG((NULL, TX_CTL, 746 "<== nxge_tx_pkt_header_reserve: allocb failed")); 747 return (NULL); 748 } 749 750 NXGE_DEBUG_MSG((NULL, TX_CTL, 751 "==> nxge_tx_pkt_header_reserve: get new mp")); 752 DB_TYPE(newmp) = M_DATA; 753 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 754 linkb(newmp, mp); 755 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 756 757 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 758 "b_rptr $%p b_wptr $%p", 759 newmp->b_rptr, newmp->b_wptr)); 760 761 NXGE_DEBUG_MSG((NULL, TX_CTL, 762 "<== nxge_tx_pkt_header_reserve: use new mp")); 763 764 return (newmp); 765 } 766 767 int 768 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 769 { 770 uint_t nmblks; 771 ssize_t len; 772 uint_t pkt_len; 773 p_mblk_t nmp, bmp, tmp; 774 uint8_t *b_wptr; 775 776 NXGE_DEBUG_MSG((NULL, TX_CTL, 777 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 778 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 779 780 nmp = mp; 781 bmp = mp; 782 nmblks = 0; 783 pkt_len = 0; 784 *tot_xfer_len_p = 0; 785 786 while (nmp) { 787 len = MBLKL(nmp); 788 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 789 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 790 len, pkt_len, nmblks, 791 *tot_xfer_len_p)); 792 793 if (len <= 0) { 794 bmp = nmp; 795 nmp = nmp->b_cont; 796 NXGE_DEBUG_MSG((NULL, TX_CTL, 797 "==> nxge_tx_pkt_nmblocks: " 798 "len (0) pkt_len %d nmblks %d", 799 pkt_len, nmblks)); 800 continue; 801 } 802 803 *tot_xfer_len_p += len; 804 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 805 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 806 len, pkt_len, nmblks, 807 *tot_xfer_len_p)); 808 809 if (len < nxge_bcopy_thresh) { 810 NXGE_DEBUG_MSG((NULL, TX_CTL, 811 "==> nxge_tx_pkt_nmblocks: " 812 "len %d (< thresh) pkt_len %d nmblks %d", 813 len, pkt_len, nmblks)); 814 if (pkt_len == 0) 815 nmblks++; 816 pkt_len += len; 817 if (pkt_len >= nxge_bcopy_thresh) { 818 pkt_len = 0; 819 len = 0; 820 nmp = bmp; 821 } 822 } else { 823 NXGE_DEBUG_MSG((NULL, TX_CTL, 824 "==> nxge_tx_pkt_nmblocks: " 825 "len %d (> thresh) pkt_len %d nmblks %d", 826 len, pkt_len, nmblks)); 827 pkt_len = 0; 828 nmblks++; 829 /* 830 * Hardware limits the transfer length to 4K. 831 * If len is more than 4K, we need to break 832 * it up to at most 2 more blocks. 833 */ 834 if (len > TX_MAX_TRANSFER_LENGTH) { 835 uint32_t nsegs; 836 837 nsegs = 1; 838 NXGE_DEBUG_MSG((NULL, TX_CTL, 839 "==> nxge_tx_pkt_nmblocks: " 840 "len %d pkt_len %d nmblks %d nsegs %d", 841 len, pkt_len, nmblks, nsegs)); 842 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 843 ++nsegs; 844 } 845 do { 846 b_wptr = nmp->b_rptr + 847 TX_MAX_TRANSFER_LENGTH; 848 nmp->b_wptr = b_wptr; 849 if ((tmp = dupb(nmp)) == NULL) { 850 return (0); 851 } 852 tmp->b_rptr = b_wptr; 853 tmp->b_wptr = nmp->b_wptr; 854 tmp->b_cont = nmp->b_cont; 855 nmp->b_cont = tmp; 856 nmblks++; 857 if (--nsegs) { 858 nmp = tmp; 859 } 860 } while (nsegs); 861 nmp = tmp; 862 } 863 } 864 865 /* 866 * Hardware limits the transmit gather pointers to 15. 867 */ 868 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 869 TX_MAX_GATHER_POINTERS) { 870 NXGE_DEBUG_MSG((NULL, TX_CTL, 871 "==> nxge_tx_pkt_nmblocks: pull msg - " 872 "len %d pkt_len %d nmblks %d", 873 len, pkt_len, nmblks)); 874 /* Pull all message blocks from b_cont */ 875 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 876 return (0); 877 } 878 freemsg(nmp->b_cont); 879 nmp->b_cont = tmp; 880 pkt_len = 0; 881 } 882 bmp = nmp; 883 nmp = nmp->b_cont; 884 } 885 886 NXGE_DEBUG_MSG((NULL, TX_CTL, 887 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 888 "nmblks %d len %d tot_xfer_len %d", 889 mp->b_rptr, mp->b_wptr, nmblks, 890 MBLKL(mp), *tot_xfer_len_p)); 891 892 return (nmblks); 893 } 894 895 boolean_t 896 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 897 { 898 boolean_t status = B_TRUE; 899 p_nxge_dma_common_t tx_desc_dma_p; 900 nxge_dma_common_t desc_area; 901 p_tx_desc_t tx_desc_ring_vp; 902 p_tx_desc_t tx_desc_p; 903 p_tx_desc_t tx_desc_pp; 904 tx_desc_t r_tx_desc; 905 p_tx_msg_t tx_msg_ring; 906 p_tx_msg_t tx_msg_p; 907 npi_handle_t handle; 908 tx_ring_hdl_t tx_head; 909 uint32_t pkt_len; 910 uint_t tx_rd_index; 911 uint16_t head_index, tail_index; 912 uint8_t tdc; 913 boolean_t head_wrap, tail_wrap; 914 p_nxge_tx_ring_stats_t tdc_stats; 915 int rc; 916 917 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 918 919 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 920 (nmblks != 0)); 921 NXGE_DEBUG_MSG((nxgep, TX_CTL, 922 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 923 tx_ring_p->descs_pending, nxge_reclaim_pending, 924 nmblks)); 925 if (!status) { 926 tx_desc_dma_p = &tx_ring_p->tdc_desc; 927 desc_area = tx_ring_p->tdc_desc; 928 handle = NXGE_DEV_NPI_HANDLE(nxgep); 929 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 930 tx_desc_ring_vp = 931 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 932 tx_rd_index = tx_ring_p->rd_index; 933 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 934 tx_msg_ring = tx_ring_p->tx_msg_ring; 935 tx_msg_p = &tx_msg_ring[tx_rd_index]; 936 tdc = tx_ring_p->tdc; 937 tdc_stats = tx_ring_p->tdc_stats; 938 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 939 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 940 } 941 942 tail_index = tx_ring_p->wr_index; 943 tail_wrap = tx_ring_p->wr_index_wrap; 944 945 NXGE_DEBUG_MSG((nxgep, TX_CTL, 946 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 947 "tail_index %d tail_wrap %d " 948 "tx_desc_p $%p ($%p) ", 949 tdc, tx_rd_index, tail_index, tail_wrap, 950 tx_desc_p, (*(uint64_t *)tx_desc_p))); 951 /* 952 * Read the hardware maintained transmit head 953 * and wrap around bit. 954 */ 955 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 956 head_index = tx_head.bits.ldw.head; 957 head_wrap = tx_head.bits.ldw.wrap; 958 NXGE_DEBUG_MSG((nxgep, TX_CTL, 959 "==> nxge_txdma_reclaim: " 960 "tx_rd_index %d tail %d tail_wrap %d " 961 "head %d wrap %d", 962 tx_rd_index, tail_index, tail_wrap, 963 head_index, head_wrap)); 964 965 if (head_index == tail_index) { 966 if (TXDMA_RING_EMPTY(head_index, head_wrap, 967 tail_index, tail_wrap) && 968 (head_index == tx_rd_index)) { 969 NXGE_DEBUG_MSG((nxgep, TX_CTL, 970 "==> nxge_txdma_reclaim: EMPTY")); 971 return (B_TRUE); 972 } 973 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 975 "==> nxge_txdma_reclaim: Checking " 976 "if ring full")); 977 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 978 tail_wrap)) { 979 NXGE_DEBUG_MSG((nxgep, TX_CTL, 980 "==> nxge_txdma_reclaim: full")); 981 return (B_FALSE); 982 } 983 } 984 985 NXGE_DEBUG_MSG((nxgep, TX_CTL, 986 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 987 988 tx_desc_pp = &r_tx_desc; 989 while ((tx_rd_index != head_index) && 990 (tx_ring_p->descs_pending != 0)) { 991 992 NXGE_DEBUG_MSG((nxgep, TX_CTL, 993 "==> nxge_txdma_reclaim: Checking if pending")); 994 995 NXGE_DEBUG_MSG((nxgep, TX_CTL, 996 "==> nxge_txdma_reclaim: " 997 "descs_pending %d ", 998 tx_ring_p->descs_pending)); 999 1000 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1001 "==> nxge_txdma_reclaim: " 1002 "(tx_rd_index %d head_index %d " 1003 "(tx_desc_p $%p)", 1004 tx_rd_index, head_index, 1005 tx_desc_p)); 1006 1007 tx_desc_pp->value = tx_desc_p->value; 1008 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1009 "==> nxge_txdma_reclaim: " 1010 "(tx_rd_index %d head_index %d " 1011 "tx_desc_p $%p (desc value 0x%llx) ", 1012 tx_rd_index, head_index, 1013 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 1014 1015 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1016 "==> nxge_txdma_reclaim: dump desc:")); 1017 1018 pkt_len = tx_desc_pp->bits.hdw.tr_len; 1019 tdc_stats->obytes += pkt_len; 1020 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 1021 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1022 "==> nxge_txdma_reclaim: pkt_len %d " 1023 "tdc channel %d opackets %d", 1024 pkt_len, 1025 tdc, 1026 tdc_stats->opackets)); 1027 1028 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1029 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1030 "tx_desc_p = $%p " 1031 "tx_desc_pp = $%p " 1032 "index = %d", 1033 tx_desc_p, 1034 tx_desc_pp, 1035 tx_ring_p->rd_index)); 1036 (void) dvma_unload(tx_msg_p->dvma_handle, 1037 0, -1); 1038 tx_msg_p->dvma_handle = NULL; 1039 if (tx_ring_p->dvma_wr_index == 1040 tx_ring_p->dvma_wrap_mask) { 1041 tx_ring_p->dvma_wr_index = 0; 1042 } else { 1043 tx_ring_p->dvma_wr_index++; 1044 } 1045 tx_ring_p->dvma_pending--; 1046 } else if (tx_msg_p->flags.dma_type == 1047 USE_DMA) { 1048 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1049 "==> nxge_txdma_reclaim: " 1050 "USE DMA")); 1051 if (rc = ddi_dma_unbind_handle 1052 (tx_msg_p->dma_handle)) { 1053 cmn_err(CE_WARN, "!nxge_reclaim: " 1054 "ddi_dma_unbind_handle " 1055 "failed. status %d", rc); 1056 } 1057 } 1058 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1059 "==> nxge_txdma_reclaim: count packets")); 1060 /* 1061 * count a chained packet only once. 1062 */ 1063 if (tx_msg_p->tx_message != NULL) { 1064 freemsg(tx_msg_p->tx_message); 1065 tx_msg_p->tx_message = NULL; 1066 } 1067 1068 tx_msg_p->flags.dma_type = USE_NONE; 1069 tx_rd_index = tx_ring_p->rd_index; 1070 tx_rd_index = (tx_rd_index + 1) & 1071 tx_ring_p->tx_wrap_mask; 1072 tx_ring_p->rd_index = tx_rd_index; 1073 tx_ring_p->descs_pending--; 1074 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1075 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1076 } 1077 1078 status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 1079 (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 1080 if (status) { 1081 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1082 } 1083 } else { 1084 status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 1085 (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 1086 } 1087 1088 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1089 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1090 1091 return (status); 1092 } 1093 1094 /* 1095 * nxge_tx_intr 1096 * 1097 * Process a TDC interrupt 1098 * 1099 * Arguments: 1100 * arg1 A Logical Device state Vector (LSV) data structure. 1101 * arg2 nxge_t * 1102 * 1103 * Notes: 1104 * 1105 * NPI/NXGE function calls: 1106 * npi_txdma_control_status() 1107 * npi_intr_ldg_mgmt_set() 1108 * 1109 * nxge_tx_err_evnts() 1110 * nxge_txdma_reclaim() 1111 * 1112 * Registers accessed: 1113 * TX_CS DMC+0x40028 Transmit Control And Status 1114 * PIO_LDSV 1115 * 1116 * Context: 1117 * Any domain 1118 */ 1119 uint_t 1120 nxge_tx_intr(void *arg1, void *arg2) 1121 { 1122 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1123 p_nxge_t nxgep = (p_nxge_t)arg2; 1124 p_nxge_ldg_t ldgp; 1125 uint8_t channel; 1126 uint32_t vindex; 1127 npi_handle_t handle; 1128 tx_cs_t cs; 1129 p_tx_ring_t *tx_rings; 1130 p_tx_ring_t tx_ring_p; 1131 npi_status_t rs = NPI_SUCCESS; 1132 uint_t serviced = DDI_INTR_UNCLAIMED; 1133 nxge_status_t status = NXGE_OK; 1134 1135 if (ldvp == NULL) { 1136 NXGE_DEBUG_MSG((NULL, INT_CTL, 1137 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1138 nxgep, ldvp)); 1139 return (DDI_INTR_UNCLAIMED); 1140 } 1141 1142 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1143 nxgep = ldvp->nxgep; 1144 } 1145 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1146 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1147 nxgep, ldvp)); 1148 1149 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1150 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1151 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1152 "<== nxge_tx_intr: interface not started or intialized")); 1153 return (DDI_INTR_CLAIMED); 1154 } 1155 1156 /* 1157 * This interrupt handler is for a specific 1158 * transmit dma channel. 1159 */ 1160 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1161 /* Get the control and status for this channel. */ 1162 channel = ldvp->channel; 1163 ldgp = ldvp->ldgp; 1164 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1165 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1166 "channel %d", 1167 nxgep, ldvp, channel)); 1168 1169 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1170 vindex = ldvp->vdma_index; 1171 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1172 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1173 channel, vindex, rs)); 1174 if (!rs && cs.bits.ldw.mk) { 1175 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1176 "==> nxge_tx_intr:channel %d ring index %d " 1177 "status 0x%08x (mk bit set)", 1178 channel, vindex, rs)); 1179 tx_rings = nxgep->tx_rings->rings; 1180 tx_ring_p = tx_rings[vindex]; 1181 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1182 "==> nxge_tx_intr:channel %d ring index %d " 1183 "status 0x%08x (mk bit set, calling reclaim)", 1184 channel, vindex, rs)); 1185 1186 nxge_tx_ring_task((void *)tx_ring_p); 1187 } 1188 1189 /* 1190 * Process other transmit control and status. 1191 * Check the ldv state. 1192 */ 1193 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1194 /* 1195 * Rearm this logical group if this is a single device 1196 * group. 1197 */ 1198 if (ldgp->nldvs == 1) { 1199 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1200 "==> nxge_tx_intr: rearm")); 1201 if (status == NXGE_OK) { 1202 if (isLDOMguest(nxgep)) { 1203 nxge_hio_ldgimgn(nxgep, ldgp); 1204 } else { 1205 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1206 B_TRUE, ldgp->ldg_timer); 1207 } 1208 } 1209 } 1210 1211 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1212 serviced = DDI_INTR_CLAIMED; 1213 return (serviced); 1214 } 1215 1216 void 1217 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1218 { 1219 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1220 1221 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1222 1223 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1224 } 1225 1226 void 1227 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1228 { 1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1230 1231 (void) nxge_txdma_stop(nxgep); 1232 1233 (void) nxge_fixup_txdma_rings(nxgep); 1234 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1235 (void) nxge_tx_mac_enable(nxgep); 1236 (void) nxge_txdma_hw_kick(nxgep); 1237 1238 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1239 } 1240 1241 npi_status_t 1242 nxge_txdma_channel_disable( 1243 nxge_t *nxge, 1244 int channel) 1245 { 1246 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1247 npi_status_t rs; 1248 tdmc_intr_dbg_t intr_dbg; 1249 1250 /* 1251 * Stop the dma channel and wait for the stop-done. 1252 * If the stop-done bit is not present, then force 1253 * an error so TXC will stop. 1254 * All channels bound to this port need to be stopped 1255 * and reset after injecting an interrupt error. 1256 */ 1257 rs = npi_txdma_channel_disable(handle, channel); 1258 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1259 "==> nxge_txdma_channel_disable(%d) " 1260 "rs 0x%x", channel, rs)); 1261 if (rs != NPI_SUCCESS) { 1262 /* Inject any error */ 1263 intr_dbg.value = 0; 1264 intr_dbg.bits.ldw.nack_pref = 1; 1265 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1266 "==> nxge_txdma_hw_mode: " 1267 "channel %d (stop failed 0x%x) " 1268 "(inject err)", rs, channel)); 1269 (void) npi_txdma_inj_int_error_set( 1270 handle, channel, &intr_dbg); 1271 rs = npi_txdma_channel_disable(handle, channel); 1272 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1273 "==> nxge_txdma_hw_mode: " 1274 "channel %d (stop again 0x%x) " 1275 "(after inject err)", 1276 rs, channel)); 1277 } 1278 1279 return (rs); 1280 } 1281 1282 /* 1283 * nxge_txdma_hw_mode 1284 * 1285 * Toggle all TDCs on (enable) or off (disable). 1286 * 1287 * Arguments: 1288 * nxgep 1289 * enable Enable or disable a TDC. 1290 * 1291 * Notes: 1292 * 1293 * NPI/NXGE function calls: 1294 * npi_txdma_channel_enable(TX_CS) 1295 * npi_txdma_channel_disable(TX_CS) 1296 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1297 * 1298 * Registers accessed: 1299 * TX_CS DMC+0x40028 Transmit Control And Status 1300 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1301 * 1302 * Context: 1303 * Any domain 1304 */ 1305 nxge_status_t 1306 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1307 { 1308 nxge_grp_set_t *set = &nxgep->tx_set; 1309 1310 npi_handle_t handle; 1311 nxge_status_t status; 1312 npi_status_t rs; 1313 int tdc; 1314 1315 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1316 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1317 1318 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1319 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1320 "<== nxge_txdma_mode: not initialized")); 1321 return (NXGE_ERROR); 1322 } 1323 1324 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1325 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1326 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1327 return (NXGE_ERROR); 1328 } 1329 1330 /* Enable or disable all of the TDCs owned by us. */ 1331 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1332 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1333 if ((1 << tdc) & set->owned.map) { 1334 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1335 if (ring) { 1336 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1337 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1338 if (enable) { 1339 rs = npi_txdma_channel_enable 1340 (handle, tdc); 1341 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1342 "==> nxge_txdma_hw_mode: " 1343 "channel %d (enable) rs 0x%x", 1344 tdc, rs)); 1345 } else { 1346 rs = nxge_txdma_channel_disable 1347 (nxgep, tdc); 1348 } 1349 } 1350 } 1351 } 1352 1353 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1354 1355 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1356 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1357 1358 return (status); 1359 } 1360 1361 void 1362 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1363 { 1364 npi_handle_t handle; 1365 1366 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1367 "==> nxge_txdma_enable_channel: channel %d", channel)); 1368 1369 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1370 /* enable the transmit dma channels */ 1371 (void) npi_txdma_channel_enable(handle, channel); 1372 1373 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1374 } 1375 1376 void 1377 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1378 { 1379 npi_handle_t handle; 1380 1381 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1382 "==> nxge_txdma_disable_channel: channel %d", channel)); 1383 1384 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1385 /* stop the transmit dma channels */ 1386 (void) npi_txdma_channel_disable(handle, channel); 1387 1388 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1389 } 1390 1391 /* 1392 * nxge_txdma_stop_inj_err 1393 * 1394 * Stop a TDC. If at first we don't succeed, inject an error. 1395 * 1396 * Arguments: 1397 * nxgep 1398 * channel The channel to stop. 1399 * 1400 * Notes: 1401 * 1402 * NPI/NXGE function calls: 1403 * npi_txdma_channel_disable() 1404 * npi_txdma_inj_int_error_set() 1405 * #if defined(NXGE_DEBUG) 1406 * nxge_txdma_regs_dump_channels(nxgep); 1407 * #endif 1408 * 1409 * Registers accessed: 1410 * TX_CS DMC+0x40028 Transmit Control And Status 1411 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1412 * 1413 * Context: 1414 * Any domain 1415 */ 1416 int 1417 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1418 { 1419 npi_handle_t handle; 1420 tdmc_intr_dbg_t intr_dbg; 1421 int status; 1422 npi_status_t rs = NPI_SUCCESS; 1423 1424 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1425 /* 1426 * Stop the dma channel waits for the stop done. 1427 * If the stop done bit is not set, then create 1428 * an error. 1429 */ 1430 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1431 rs = npi_txdma_channel_disable(handle, channel); 1432 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1433 if (status == NXGE_OK) { 1434 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1435 "<== nxge_txdma_stop_inj_err (channel %d): " 1436 "stopped OK", channel)); 1437 return (status); 1438 } 1439 1440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1441 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1442 "injecting error", channel, rs)); 1443 /* Inject any error */ 1444 intr_dbg.value = 0; 1445 intr_dbg.bits.ldw.nack_pref = 1; 1446 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1447 1448 /* Stop done bit will be set as a result of error injection */ 1449 rs = npi_txdma_channel_disable(handle, channel); 1450 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1451 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1452 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1453 "<== nxge_txdma_stop_inj_err (channel %d): " 1454 "stopped OK ", channel)); 1455 return (status); 1456 } 1457 1458 #if defined(NXGE_DEBUG) 1459 nxge_txdma_regs_dump_channels(nxgep); 1460 #endif 1461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1462 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1463 " (injected error but still not stopped)", channel, rs)); 1464 1465 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1466 return (status); 1467 } 1468 1469 /*ARGSUSED*/ 1470 void 1471 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1472 { 1473 nxge_grp_set_t *set = &nxgep->tx_set; 1474 int tdc; 1475 1476 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1477 1478 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1479 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1480 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1481 return; 1482 } 1483 1484 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1485 if ((1 << tdc) & set->owned.map) { 1486 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1487 if (ring) { 1488 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1489 "==> nxge_fixup_txdma_rings: channel %d", 1490 tdc)); 1491 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1492 } 1493 } 1494 } 1495 1496 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1497 } 1498 1499 /*ARGSUSED*/ 1500 void 1501 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1502 { 1503 p_tx_ring_t ring_p; 1504 1505 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1506 ring_p = nxge_txdma_get_ring(nxgep, channel); 1507 if (ring_p == NULL) { 1508 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1509 return; 1510 } 1511 1512 if (ring_p->tdc != channel) { 1513 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1514 "<== nxge_txdma_fix_channel: channel not matched " 1515 "ring tdc %d passed channel", 1516 ring_p->tdc, channel)); 1517 return; 1518 } 1519 1520 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1521 1522 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1523 } 1524 1525 /*ARGSUSED*/ 1526 void 1527 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1528 { 1529 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1530 1531 if (ring_p == NULL) { 1532 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1533 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1534 return; 1535 } 1536 1537 if (ring_p->tdc != channel) { 1538 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1539 "<== nxge_txdma_fixup_channel: channel not matched " 1540 "ring tdc %d passed channel", 1541 ring_p->tdc, channel)); 1542 return; 1543 } 1544 1545 MUTEX_ENTER(&ring_p->lock); 1546 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1547 ring_p->rd_index = 0; 1548 ring_p->wr_index = 0; 1549 ring_p->ring_head.value = 0; 1550 ring_p->ring_kick_tail.value = 0; 1551 ring_p->descs_pending = 0; 1552 MUTEX_EXIT(&ring_p->lock); 1553 1554 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1555 } 1556 1557 /*ARGSUSED*/ 1558 void 1559 nxge_txdma_hw_kick(p_nxge_t nxgep) 1560 { 1561 nxge_grp_set_t *set = &nxgep->tx_set; 1562 int tdc; 1563 1564 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1565 1566 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1567 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1568 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1569 return; 1570 } 1571 1572 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1573 if ((1 << tdc) & set->owned.map) { 1574 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1575 if (ring) { 1576 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1577 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1578 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1579 } 1580 } 1581 } 1582 1583 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1584 } 1585 1586 /*ARGSUSED*/ 1587 void 1588 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1589 { 1590 p_tx_ring_t ring_p; 1591 1592 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1593 1594 ring_p = nxge_txdma_get_ring(nxgep, channel); 1595 if (ring_p == NULL) { 1596 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1597 " nxge_txdma_kick_channel")); 1598 return; 1599 } 1600 1601 if (ring_p->tdc != channel) { 1602 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1603 "<== nxge_txdma_kick_channel: channel not matched " 1604 "ring tdc %d passed channel", 1605 ring_p->tdc, channel)); 1606 return; 1607 } 1608 1609 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1610 1611 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1612 } 1613 1614 /*ARGSUSED*/ 1615 void 1616 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1617 { 1618 1619 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1620 1621 if (ring_p == NULL) { 1622 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1623 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1624 return; 1625 } 1626 1627 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1628 } 1629 1630 /* 1631 * nxge_check_tx_hang 1632 * 1633 * Check the state of all TDCs belonging to nxgep. 1634 * 1635 * Arguments: 1636 * nxgep 1637 * 1638 * Notes: 1639 * Called by nxge_hw.c:nxge_check_hw_state(). 1640 * 1641 * NPI/NXGE function calls: 1642 * 1643 * Registers accessed: 1644 * 1645 * Context: 1646 * Any domain 1647 */ 1648 /*ARGSUSED*/ 1649 void 1650 nxge_check_tx_hang(p_nxge_t nxgep) 1651 { 1652 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1653 1654 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1655 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1656 goto nxge_check_tx_hang_exit; 1657 } 1658 1659 /* 1660 * Needs inputs from hardware for regs: 1661 * head index had not moved since last timeout. 1662 * packets not transmitted or stuffed registers. 1663 */ 1664 if (nxge_txdma_hung(nxgep)) { 1665 nxge_fixup_hung_txdma_rings(nxgep); 1666 } 1667 1668 nxge_check_tx_hang_exit: 1669 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1670 } 1671 1672 /* 1673 * nxge_txdma_hung 1674 * 1675 * Reset a TDC. 1676 * 1677 * Arguments: 1678 * nxgep 1679 * channel The channel to reset. 1680 * reg_data The current TX_CS. 1681 * 1682 * Notes: 1683 * Called by nxge_check_tx_hang() 1684 * 1685 * NPI/NXGE function calls: 1686 * nxge_txdma_channel_hung() 1687 * 1688 * Registers accessed: 1689 * 1690 * Context: 1691 * Any domain 1692 */ 1693 int 1694 nxge_txdma_hung(p_nxge_t nxgep) 1695 { 1696 nxge_grp_set_t *set = &nxgep->tx_set; 1697 int tdc; 1698 boolean_t shared; 1699 1700 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1701 1702 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1703 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1704 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1705 return (B_FALSE); 1706 } 1707 1708 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1709 /* 1710 * Grab the shared state of the TDC. 1711 */ 1712 if (isLDOMservice(nxgep)) { 1713 nxge_hio_data_t *nhd = 1714 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1715 1716 MUTEX_ENTER(&nhd->lock); 1717 shared = nxgep->tdc_is_shared[tdc]; 1718 MUTEX_EXIT(&nhd->lock); 1719 } else { 1720 shared = B_FALSE; 1721 } 1722 1723 /* 1724 * Now, process continue to process. 1725 */ 1726 if (((1 << tdc) & set->owned.map) && !shared) { 1727 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1728 if (ring) { 1729 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1730 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1731 "==> nxge_txdma_hung: TDC %d hung", 1732 tdc)); 1733 return (B_TRUE); 1734 } 1735 } 1736 } 1737 } 1738 1739 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1740 1741 return (B_FALSE); 1742 } 1743 1744 /* 1745 * nxge_txdma_channel_hung 1746 * 1747 * Reset a TDC. 1748 * 1749 * Arguments: 1750 * nxgep 1751 * ring <channel>'s ring. 1752 * channel The channel to reset. 1753 * 1754 * Notes: 1755 * Called by nxge_txdma.c:nxge_txdma_hung() 1756 * 1757 * NPI/NXGE function calls: 1758 * npi_txdma_ring_head_get() 1759 * 1760 * Registers accessed: 1761 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1762 * 1763 * Context: 1764 * Any domain 1765 */ 1766 int 1767 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1768 { 1769 uint16_t head_index, tail_index; 1770 boolean_t head_wrap, tail_wrap; 1771 npi_handle_t handle; 1772 tx_ring_hdl_t tx_head; 1773 uint_t tx_rd_index; 1774 1775 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1776 1777 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1778 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1779 "==> nxge_txdma_channel_hung: channel %d", channel)); 1780 MUTEX_ENTER(&tx_ring_p->lock); 1781 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1782 1783 tail_index = tx_ring_p->wr_index; 1784 tail_wrap = tx_ring_p->wr_index_wrap; 1785 tx_rd_index = tx_ring_p->rd_index; 1786 MUTEX_EXIT(&tx_ring_p->lock); 1787 1788 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1789 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1790 "tail_index %d tail_wrap %d ", 1791 channel, tx_rd_index, tail_index, tail_wrap)); 1792 /* 1793 * Read the hardware maintained transmit head 1794 * and wrap around bit. 1795 */ 1796 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1797 head_index = tx_head.bits.ldw.head; 1798 head_wrap = tx_head.bits.ldw.wrap; 1799 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1800 "==> nxge_txdma_channel_hung: " 1801 "tx_rd_index %d tail %d tail_wrap %d " 1802 "head %d wrap %d", 1803 tx_rd_index, tail_index, tail_wrap, 1804 head_index, head_wrap)); 1805 1806 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1807 tail_index, tail_wrap) && 1808 (head_index == tx_rd_index)) { 1809 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1810 "==> nxge_txdma_channel_hung: EMPTY")); 1811 return (B_FALSE); 1812 } 1813 1814 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1815 "==> nxge_txdma_channel_hung: Checking if ring full")); 1816 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1817 tail_wrap)) { 1818 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1819 "==> nxge_txdma_channel_hung: full")); 1820 return (B_TRUE); 1821 } 1822 1823 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1824 1825 return (B_FALSE); 1826 } 1827 1828 /* 1829 * nxge_fixup_hung_txdma_rings 1830 * 1831 * Disable a TDC. 1832 * 1833 * Arguments: 1834 * nxgep 1835 * channel The channel to reset. 1836 * reg_data The current TX_CS. 1837 * 1838 * Notes: 1839 * Called by nxge_check_tx_hang() 1840 * 1841 * NPI/NXGE function calls: 1842 * npi_txdma_ring_head_get() 1843 * 1844 * Registers accessed: 1845 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1846 * 1847 * Context: 1848 * Any domain 1849 */ 1850 /*ARGSUSED*/ 1851 void 1852 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1853 { 1854 nxge_grp_set_t *set = &nxgep->tx_set; 1855 int tdc; 1856 1857 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1858 1859 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1860 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1861 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1862 return; 1863 } 1864 1865 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1866 if ((1 << tdc) & set->owned.map) { 1867 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1868 if (ring) { 1869 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1870 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1871 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1872 tdc)); 1873 } 1874 } 1875 } 1876 1877 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1878 } 1879 1880 /* 1881 * nxge_txdma_fixup_hung_channel 1882 * 1883 * 'Fix' a hung TDC. 1884 * 1885 * Arguments: 1886 * nxgep 1887 * channel The channel to fix. 1888 * 1889 * Notes: 1890 * Called by nxge_fixup_hung_txdma_rings() 1891 * 1892 * 1. Reclaim the TDC. 1893 * 2. Disable the TDC. 1894 * 1895 * NPI/NXGE function calls: 1896 * nxge_txdma_reclaim() 1897 * npi_txdma_channel_disable(TX_CS) 1898 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1899 * 1900 * Registers accessed: 1901 * TX_CS DMC+0x40028 Transmit Control And Status 1902 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1903 * 1904 * Context: 1905 * Any domain 1906 */ 1907 /*ARGSUSED*/ 1908 void 1909 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1910 { 1911 p_tx_ring_t ring_p; 1912 1913 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1914 ring_p = nxge_txdma_get_ring(nxgep, channel); 1915 if (ring_p == NULL) { 1916 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1917 "<== nxge_txdma_fix_hung_channel")); 1918 return; 1919 } 1920 1921 if (ring_p->tdc != channel) { 1922 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1923 "<== nxge_txdma_fix_hung_channel: channel not matched " 1924 "ring tdc %d passed channel", 1925 ring_p->tdc, channel)); 1926 return; 1927 } 1928 1929 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1930 1931 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1932 } 1933 1934 /*ARGSUSED*/ 1935 void 1936 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1937 uint16_t channel) 1938 { 1939 npi_handle_t handle; 1940 tdmc_intr_dbg_t intr_dbg; 1941 int status = NXGE_OK; 1942 1943 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1944 1945 if (ring_p == NULL) { 1946 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1947 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1948 return; 1949 } 1950 1951 if (ring_p->tdc != channel) { 1952 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1953 "<== nxge_txdma_fixup_hung_channel: channel " 1954 "not matched " 1955 "ring tdc %d passed channel", 1956 ring_p->tdc, channel)); 1957 return; 1958 } 1959 1960 /* Reclaim descriptors */ 1961 MUTEX_ENTER(&ring_p->lock); 1962 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1963 MUTEX_EXIT(&ring_p->lock); 1964 1965 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1966 /* 1967 * Stop the dma channel waits for the stop done. 1968 * If the stop done bit is not set, then force 1969 * an error. 1970 */ 1971 status = npi_txdma_channel_disable(handle, channel); 1972 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1973 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1974 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1975 "ring tdc %d passed channel %d", 1976 ring_p->tdc, channel)); 1977 return; 1978 } 1979 1980 /* Inject any error */ 1981 intr_dbg.value = 0; 1982 intr_dbg.bits.ldw.nack_pref = 1; 1983 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1984 1985 /* Stop done bit will be set as a result of error injection */ 1986 status = npi_txdma_channel_disable(handle, channel); 1987 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1988 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1989 "<== nxge_txdma_fixup_hung_channel: stopped again" 1990 "ring tdc %d passed channel", 1991 ring_p->tdc, channel)); 1992 return; 1993 } 1994 1995 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1996 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1997 "ring tdc %d passed channel", 1998 ring_p->tdc, channel)); 1999 2000 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 2001 } 2002 2003 /*ARGSUSED*/ 2004 void 2005 nxge_reclaim_rings(p_nxge_t nxgep) 2006 { 2007 nxge_grp_set_t *set = &nxgep->tx_set; 2008 int tdc; 2009 2010 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 2011 2012 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2013 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2014 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2015 return; 2016 } 2017 2018 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2019 if ((1 << tdc) & set->owned.map) { 2020 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2021 if (ring) { 2022 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2023 "==> nxge_reclaim_rings: TDC %d", tdc)); 2024 MUTEX_ENTER(&ring->lock); 2025 (void) nxge_txdma_reclaim(nxgep, ring, 0); 2026 MUTEX_EXIT(&ring->lock); 2027 } 2028 } 2029 } 2030 2031 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 2032 } 2033 2034 void 2035 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 2036 { 2037 nxge_grp_set_t *set = &nxgep->tx_set; 2038 npi_handle_t handle; 2039 int tdc; 2040 2041 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2042 2043 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2044 2045 if (!isLDOMguest(nxgep)) { 2046 (void) npi_txdma_dump_fzc_regs(handle); 2047 2048 /* Dump TXC registers. */ 2049 (void) npi_txc_dump_fzc_regs(handle); 2050 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2051 } 2052 2053 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2054 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2055 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2056 return; 2057 } 2058 2059 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2060 if ((1 << tdc) & set->owned.map) { 2061 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2062 if (ring) { 2063 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2064 "==> nxge_txdma_regs_dump_channels: " 2065 "TDC %d", tdc)); 2066 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2067 2068 /* Dump TXC registers, if able to. */ 2069 if (!isLDOMguest(nxgep)) { 2070 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2071 "==> nxge_txdma_regs_dump_channels:" 2072 " FZC TDC %d", tdc)); 2073 (void) npi_txc_dump_tdc_fzc_regs 2074 (handle, tdc); 2075 } 2076 nxge_txdma_regs_dump(nxgep, tdc); 2077 } 2078 } 2079 } 2080 2081 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2082 } 2083 2084 void 2085 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2086 { 2087 npi_handle_t handle; 2088 tx_ring_hdl_t hdl; 2089 tx_ring_kick_t kick; 2090 tx_cs_t cs; 2091 txc_control_t control; 2092 uint32_t bitmap = 0; 2093 uint32_t burst = 0; 2094 uint32_t bytes = 0; 2095 dma_log_page_t cfg; 2096 2097 printf("\n\tfunc # %d tdc %d ", 2098 nxgep->function_num, channel); 2099 cfg.page_num = 0; 2100 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2101 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2102 printf("\n\tlog page func %d valid page 0 %d", 2103 cfg.func_num, cfg.valid); 2104 cfg.page_num = 1; 2105 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2106 printf("\n\tlog page func %d valid page 1 %d", 2107 cfg.func_num, cfg.valid); 2108 2109 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2110 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2111 printf("\n\thead value is 0x%0llx", 2112 (long long)hdl.value); 2113 printf("\n\thead index %d", hdl.bits.ldw.head); 2114 printf("\n\tkick value is 0x%0llx", 2115 (long long)kick.value); 2116 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2117 2118 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2119 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2120 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2121 2122 (void) npi_txc_control(handle, OP_GET, &control); 2123 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2124 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2125 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2126 2127 printf("\n\tTXC port control 0x%0llx", 2128 (long long)control.value); 2129 printf("\n\tTXC port bitmap 0x%x", bitmap); 2130 printf("\n\tTXC max burst %d", burst); 2131 printf("\n\tTXC bytes xmt %d\n", bytes); 2132 2133 { 2134 ipp_status_t status; 2135 2136 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2137 #if defined(__i386) 2138 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2139 #else 2140 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2141 #endif 2142 } 2143 } 2144 2145 /* 2146 * nxge_tdc_hvio_setup 2147 * 2148 * I'm not exactly sure what this code does. 2149 * 2150 * Arguments: 2151 * nxgep 2152 * channel The channel to map. 2153 * 2154 * Notes: 2155 * 2156 * NPI/NXGE function calls: 2157 * na 2158 * 2159 * Context: 2160 * Service domain? 2161 */ 2162 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2163 static void 2164 nxge_tdc_hvio_setup( 2165 nxge_t *nxgep, int channel) 2166 { 2167 nxge_dma_common_t *data; 2168 nxge_dma_common_t *control; 2169 tx_ring_t *ring; 2170 2171 ring = nxgep->tx_rings->rings[channel]; 2172 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2173 2174 ring->hv_set = B_FALSE; 2175 2176 ring->hv_tx_buf_base_ioaddr_pp = 2177 (uint64_t)data->orig_ioaddr_pp; 2178 ring->hv_tx_buf_ioaddr_size = 2179 (uint64_t)data->orig_alength; 2180 2181 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2182 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2183 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2184 ring->hv_tx_buf_base_ioaddr_pp, 2185 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2186 data->ioaddr_pp, data->orig_vatopa, 2187 data->orig_alength, data->orig_alength)); 2188 2189 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2190 2191 ring->hv_tx_cntl_base_ioaddr_pp = 2192 (uint64_t)control->orig_ioaddr_pp; 2193 ring->hv_tx_cntl_ioaddr_size = 2194 (uint64_t)control->orig_alength; 2195 2196 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2197 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2198 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2199 ring->hv_tx_cntl_base_ioaddr_pp, 2200 control->orig_ioaddr_pp, control->orig_vatopa, 2201 ring->hv_tx_cntl_ioaddr_size, 2202 control->orig_alength, control->orig_alength)); 2203 } 2204 #endif 2205 2206 static nxge_status_t 2207 nxge_map_txdma(p_nxge_t nxgep, int channel) 2208 { 2209 nxge_dma_common_t **pData; 2210 nxge_dma_common_t **pControl; 2211 tx_ring_t **pRing, *ring; 2212 tx_mbox_t **mailbox; 2213 uint32_t num_chunks; 2214 2215 nxge_status_t status = NXGE_OK; 2216 2217 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2218 2219 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2220 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2222 "<== nxge_map_txdma: buf not allocated")); 2223 return (NXGE_ERROR); 2224 } 2225 } 2226 2227 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2228 return (NXGE_ERROR); 2229 2230 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2231 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2232 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2233 pRing = &nxgep->tx_rings->rings[channel]; 2234 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2235 2236 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2237 "tx_rings $%p tx_desc_rings $%p", 2238 nxgep->tx_rings, nxgep->tx_rings->rings)); 2239 2240 /* 2241 * Map descriptors from the buffer pools for <channel>. 2242 */ 2243 2244 /* 2245 * Set up and prepare buffer blocks, descriptors 2246 * and mailbox. 2247 */ 2248 status = nxge_map_txdma_channel(nxgep, channel, 2249 pData, pRing, num_chunks, pControl, mailbox); 2250 if (status != NXGE_OK) { 2251 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2252 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2253 "returned 0x%x", 2254 nxgep, channel, status)); 2255 return (status); 2256 } 2257 2258 ring = *pRing; 2259 2260 ring->index = (uint16_t)channel; 2261 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2262 2263 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2264 if (isLDOMguest(nxgep)) { 2265 (void) nxge_tdc_lp_conf(nxgep, channel); 2266 } else { 2267 nxge_tdc_hvio_setup(nxgep, channel); 2268 } 2269 #endif 2270 2271 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2272 "(status 0x%x channel %d)", status, channel)); 2273 2274 return (status); 2275 } 2276 2277 static nxge_status_t 2278 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2279 p_nxge_dma_common_t *dma_buf_p, 2280 p_tx_ring_t *tx_desc_p, 2281 uint32_t num_chunks, 2282 p_nxge_dma_common_t *dma_cntl_p, 2283 p_tx_mbox_t *tx_mbox_p) 2284 { 2285 int status = NXGE_OK; 2286 2287 /* 2288 * Set up and prepare buffer blocks, descriptors 2289 * and mailbox. 2290 */ 2291 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2292 "==> nxge_map_txdma_channel (channel %d)", channel)); 2293 /* 2294 * Transmit buffer blocks 2295 */ 2296 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2297 dma_buf_p, tx_desc_p, num_chunks); 2298 if (status != NXGE_OK) { 2299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2300 "==> nxge_map_txdma_channel (channel %d): " 2301 "map buffer failed 0x%x", channel, status)); 2302 goto nxge_map_txdma_channel_exit; 2303 } 2304 2305 /* 2306 * Transmit block ring, and mailbox. 2307 */ 2308 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2309 tx_mbox_p); 2310 2311 goto nxge_map_txdma_channel_exit; 2312 2313 nxge_map_txdma_channel_fail1: 2314 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2315 "==> nxge_map_txdma_channel: unmap buf" 2316 "(status 0x%x channel %d)", 2317 status, channel)); 2318 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2319 2320 nxge_map_txdma_channel_exit: 2321 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2322 "<== nxge_map_txdma_channel: " 2323 "(status 0x%x channel %d)", 2324 status, channel)); 2325 2326 return (status); 2327 } 2328 2329 /*ARGSUSED*/ 2330 static void 2331 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2332 { 2333 tx_ring_t *ring; 2334 tx_mbox_t *mailbox; 2335 2336 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2337 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2338 /* 2339 * unmap tx block ring, and mailbox. 2340 */ 2341 ring = nxgep->tx_rings->rings[channel]; 2342 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2343 2344 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2345 2346 /* unmap buffer blocks */ 2347 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2348 2349 nxge_free_txb(nxgep, channel); 2350 2351 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2352 } 2353 2354 /* 2355 * nxge_map_txdma_channel_cfg_ring 2356 * 2357 * Map a TDC into our kernel space. 2358 * This function allocates all of the per-channel data structures. 2359 * 2360 * Arguments: 2361 * nxgep 2362 * dma_channel The channel to map. 2363 * dma_cntl_p 2364 * tx_ring_p dma_channel's transmit ring 2365 * tx_mbox_p dma_channel's mailbox 2366 * 2367 * Notes: 2368 * 2369 * NPI/NXGE function calls: 2370 * nxge_setup_dma_common() 2371 * 2372 * Registers accessed: 2373 * none. 2374 * 2375 * Context: 2376 * Any domain 2377 */ 2378 /*ARGSUSED*/ 2379 static void 2380 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2381 p_nxge_dma_common_t *dma_cntl_p, 2382 p_tx_ring_t tx_ring_p, 2383 p_tx_mbox_t *tx_mbox_p) 2384 { 2385 p_tx_mbox_t mboxp; 2386 p_nxge_dma_common_t cntl_dmap; 2387 p_nxge_dma_common_t dmap; 2388 p_tx_rng_cfig_t tx_ring_cfig_p; 2389 p_tx_ring_kick_t tx_ring_kick_p; 2390 p_tx_cs_t tx_cs_p; 2391 p_tx_dma_ent_msk_t tx_evmask_p; 2392 p_txdma_mbh_t mboxh_p; 2393 p_txdma_mbl_t mboxl_p; 2394 uint64_t tx_desc_len; 2395 2396 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2397 "==> nxge_map_txdma_channel_cfg_ring")); 2398 2399 cntl_dmap = *dma_cntl_p; 2400 2401 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2402 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2403 sizeof (tx_desc_t)); 2404 /* 2405 * Zero out transmit ring descriptors. 2406 */ 2407 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2408 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2409 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2410 tx_cs_p = &(tx_ring_p->tx_cs); 2411 tx_evmask_p = &(tx_ring_p->tx_evmask); 2412 tx_ring_cfig_p->value = 0; 2413 tx_ring_kick_p->value = 0; 2414 tx_cs_p->value = 0; 2415 tx_evmask_p->value = 0; 2416 2417 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2418 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2419 dma_channel, 2420 dmap->dma_cookie.dmac_laddress)); 2421 2422 tx_ring_cfig_p->value = 0; 2423 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2424 tx_ring_cfig_p->value = 2425 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2426 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2427 2428 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2429 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2430 dma_channel, 2431 tx_ring_cfig_p->value)); 2432 2433 tx_cs_p->bits.ldw.rst = 1; 2434 2435 /* Map in mailbox */ 2436 mboxp = (p_tx_mbox_t) 2437 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2438 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2439 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2440 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2441 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2442 mboxh_p->value = mboxl_p->value = 0; 2443 2444 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2445 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2446 dmap->dma_cookie.dmac_laddress)); 2447 2448 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2449 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2450 2451 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2452 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2453 2454 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2455 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2456 dmap->dma_cookie.dmac_laddress)); 2457 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2458 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2459 "mbox $%p", 2460 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2461 tx_ring_p->page_valid.value = 0; 2462 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2463 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2464 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2465 tx_ring_p->page_hdl.value = 0; 2466 2467 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2468 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2469 2470 tx_ring_p->max_burst.value = 0; 2471 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2472 2473 *tx_mbox_p = mboxp; 2474 2475 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2476 "<== nxge_map_txdma_channel_cfg_ring")); 2477 } 2478 2479 /*ARGSUSED*/ 2480 static void 2481 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2482 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2483 { 2484 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2485 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2486 tx_ring_p->tdc)); 2487 2488 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2489 2490 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2491 "<== nxge_unmap_txdma_channel_cfg_ring")); 2492 } 2493 2494 /* 2495 * nxge_map_txdma_channel_buf_ring 2496 * 2497 * 2498 * Arguments: 2499 * nxgep 2500 * channel The channel to map. 2501 * dma_buf_p 2502 * tx_desc_p channel's descriptor ring 2503 * num_chunks 2504 * 2505 * Notes: 2506 * 2507 * NPI/NXGE function calls: 2508 * nxge_setup_dma_common() 2509 * 2510 * Registers accessed: 2511 * none. 2512 * 2513 * Context: 2514 * Any domain 2515 */ 2516 static nxge_status_t 2517 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2518 p_nxge_dma_common_t *dma_buf_p, 2519 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2520 { 2521 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2522 p_nxge_dma_common_t dmap; 2523 nxge_os_dma_handle_t tx_buf_dma_handle; 2524 p_tx_ring_t tx_ring_p; 2525 p_tx_msg_t tx_msg_ring; 2526 nxge_status_t status = NXGE_OK; 2527 int ddi_status = DDI_SUCCESS; 2528 int i, j, index; 2529 uint32_t size, bsize; 2530 uint32_t nblocks, nmsgs; 2531 char qname[TASKQ_NAMELEN]; 2532 2533 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2534 "==> nxge_map_txdma_channel_buf_ring")); 2535 2536 dma_bufp = tmp_bufp = *dma_buf_p; 2537 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2538 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2539 "chunks bufp $%p", 2540 channel, num_chunks, dma_bufp)); 2541 2542 nmsgs = 0; 2543 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2544 nmsgs += tmp_bufp->nblocks; 2545 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2546 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2547 "bufp $%p nblocks %d nmsgs %d", 2548 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2549 } 2550 if (!nmsgs) { 2551 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2552 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2553 "no msg blocks", 2554 channel)); 2555 status = NXGE_ERROR; 2556 goto nxge_map_txdma_channel_buf_ring_exit; 2557 } 2558 2559 tx_ring_p = (p_tx_ring_t) 2560 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2561 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2562 (void *)nxgep->interrupt_cookie); 2563 2564 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2565 tx_ring_p->tx_ring_busy = B_FALSE; 2566 tx_ring_p->nxgep = nxgep; 2567 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL; 2568 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d", 2569 nxgep->instance, channel); 2570 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1, 2571 TASKQ_DEFAULTPRI, 0); 2572 if (tx_ring_p->taskq == NULL) { 2573 goto nxge_map_txdma_channel_buf_ring_fail1; 2574 } 2575 2576 /* 2577 * Allocate transmit message rings and handles for packets 2578 * not to be copied to premapped buffers. 2579 */ 2580 size = nmsgs * sizeof (tx_msg_t); 2581 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2582 for (i = 0; i < nmsgs; i++) { 2583 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2584 DDI_DMA_DONTWAIT, 0, 2585 &tx_msg_ring[i].dma_handle); 2586 if (ddi_status != DDI_SUCCESS) { 2587 status |= NXGE_DDI_FAILED; 2588 break; 2589 } 2590 } 2591 if (i < nmsgs) { 2592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2593 "Allocate handles failed.")); 2594 goto nxge_map_txdma_channel_buf_ring_fail1; 2595 } 2596 2597 tx_ring_p->tdc = channel; 2598 tx_ring_p->tx_msg_ring = tx_msg_ring; 2599 tx_ring_p->tx_ring_size = nmsgs; 2600 tx_ring_p->num_chunks = num_chunks; 2601 if (!nxge_tx_intr_thres) { 2602 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2603 } 2604 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2605 tx_ring_p->rd_index = 0; 2606 tx_ring_p->wr_index = 0; 2607 tx_ring_p->ring_head.value = 0; 2608 tx_ring_p->ring_kick_tail.value = 0; 2609 tx_ring_p->descs_pending = 0; 2610 2611 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2612 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2613 "actual tx desc max %d nmsgs %d " 2614 "(config nxge_tx_ring_size %d)", 2615 channel, tx_ring_p->tx_ring_size, nmsgs, 2616 nxge_tx_ring_size)); 2617 2618 /* 2619 * Map in buffers from the buffer pool. 2620 */ 2621 index = 0; 2622 bsize = dma_bufp->block_size; 2623 2624 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2625 "dma_bufp $%p tx_rng_p $%p " 2626 "tx_msg_rng_p $%p bsize %d", 2627 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2628 2629 tx_buf_dma_handle = dma_bufp->dma_handle; 2630 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2631 bsize = dma_bufp->block_size; 2632 nblocks = dma_bufp->nblocks; 2633 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2634 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2635 "size %d dma_bufp $%p", 2636 i, sizeof (nxge_dma_common_t), dma_bufp)); 2637 2638 for (j = 0; j < nblocks; j++) { 2639 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2640 dmap = &tx_msg_ring[index++].buf_dma; 2641 #ifdef TX_MEM_DEBUG 2642 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2643 "==> nxge_map_txdma_channel_buf_ring: j %d" 2644 "dmap $%p", i, dmap)); 2645 #endif 2646 nxge_setup_dma_common(dmap, dma_bufp, 1, 2647 bsize); 2648 } 2649 } 2650 2651 if (i < num_chunks) { 2652 status = NXGE_ERROR; 2653 goto nxge_map_txdma_channel_buf_ring_fail1; 2654 } 2655 2656 *tx_desc_p = tx_ring_p; 2657 2658 goto nxge_map_txdma_channel_buf_ring_exit; 2659 2660 nxge_map_txdma_channel_buf_ring_fail1: 2661 if (tx_ring_p->taskq) { 2662 ddi_taskq_destroy(tx_ring_p->taskq); 2663 tx_ring_p->taskq = NULL; 2664 } 2665 2666 index--; 2667 for (; index >= 0; index--) { 2668 if (tx_msg_ring[index].dma_handle != NULL) { 2669 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2670 } 2671 } 2672 MUTEX_DESTROY(&tx_ring_p->lock); 2673 KMEM_FREE(tx_msg_ring, size); 2674 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2675 2676 status = NXGE_ERROR; 2677 2678 nxge_map_txdma_channel_buf_ring_exit: 2679 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2680 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2681 2682 return (status); 2683 } 2684 2685 /*ARGSUSED*/ 2686 static void 2687 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2688 { 2689 p_tx_msg_t tx_msg_ring; 2690 p_tx_msg_t tx_msg_p; 2691 int i; 2692 2693 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2694 "==> nxge_unmap_txdma_channel_buf_ring")); 2695 if (tx_ring_p == NULL) { 2696 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2697 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2698 return; 2699 } 2700 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2701 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2702 tx_ring_p->tdc)); 2703 2704 tx_msg_ring = tx_ring_p->tx_msg_ring; 2705 2706 /* 2707 * Since the serialization thread, timer thread and 2708 * interrupt thread can all call the transmit reclaim, 2709 * the unmapping function needs to acquire the lock 2710 * to free those buffers which were transmitted 2711 * by the hardware already. 2712 */ 2713 MUTEX_ENTER(&tx_ring_p->lock); 2714 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2715 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2716 "channel %d", 2717 tx_ring_p->tdc)); 2718 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2719 2720 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2721 tx_msg_p = &tx_msg_ring[i]; 2722 if (tx_msg_p->tx_message != NULL) { 2723 freemsg(tx_msg_p->tx_message); 2724 tx_msg_p->tx_message = NULL; 2725 } 2726 } 2727 2728 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2729 if (tx_msg_ring[i].dma_handle != NULL) { 2730 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2731 } 2732 tx_msg_ring[i].dma_handle = NULL; 2733 } 2734 2735 MUTEX_EXIT(&tx_ring_p->lock); 2736 2737 if (tx_ring_p->taskq) { 2738 ddi_taskq_destroy(tx_ring_p->taskq); 2739 tx_ring_p->taskq = NULL; 2740 } 2741 2742 MUTEX_DESTROY(&tx_ring_p->lock); 2743 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2744 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2745 2746 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2747 "<== nxge_unmap_txdma_channel_buf_ring")); 2748 } 2749 2750 static nxge_status_t 2751 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2752 { 2753 p_tx_rings_t tx_rings; 2754 p_tx_ring_t *tx_desc_rings; 2755 p_tx_mbox_areas_t tx_mbox_areas_p; 2756 p_tx_mbox_t *tx_mbox_p; 2757 nxge_status_t status = NXGE_OK; 2758 2759 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2760 2761 tx_rings = nxgep->tx_rings; 2762 if (tx_rings == NULL) { 2763 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2764 "<== nxge_txdma_hw_start: NULL ring pointer")); 2765 return (NXGE_ERROR); 2766 } 2767 tx_desc_rings = tx_rings->rings; 2768 if (tx_desc_rings == NULL) { 2769 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2770 "<== nxge_txdma_hw_start: NULL ring pointers")); 2771 return (NXGE_ERROR); 2772 } 2773 2774 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2775 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2776 2777 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2778 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2779 2780 status = nxge_txdma_start_channel(nxgep, channel, 2781 (p_tx_ring_t)tx_desc_rings[channel], 2782 (p_tx_mbox_t)tx_mbox_p[channel]); 2783 if (status != NXGE_OK) { 2784 goto nxge_txdma_hw_start_fail1; 2785 } 2786 2787 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2788 "tx_rings $%p rings $%p", 2789 nxgep->tx_rings, nxgep->tx_rings->rings)); 2790 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2791 "tx_rings $%p tx_desc_rings $%p", 2792 nxgep->tx_rings, tx_desc_rings)); 2793 2794 goto nxge_txdma_hw_start_exit; 2795 2796 nxge_txdma_hw_start_fail1: 2797 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2798 "==> nxge_txdma_hw_start: disable " 2799 "(status 0x%x channel %d)", status, channel)); 2800 2801 nxge_txdma_hw_start_exit: 2802 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2803 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2804 2805 return (status); 2806 } 2807 2808 /* 2809 * nxge_txdma_start_channel 2810 * 2811 * Start a TDC. 2812 * 2813 * Arguments: 2814 * nxgep 2815 * channel The channel to start. 2816 * tx_ring_p channel's transmit descriptor ring. 2817 * tx_mbox_p channel' smailbox. 2818 * 2819 * Notes: 2820 * 2821 * NPI/NXGE function calls: 2822 * nxge_reset_txdma_channel() 2823 * nxge_init_txdma_channel_event_mask() 2824 * nxge_enable_txdma_channel() 2825 * 2826 * Registers accessed: 2827 * none directly (see functions above). 2828 * 2829 * Context: 2830 * Any domain 2831 */ 2832 static nxge_status_t 2833 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2834 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2835 2836 { 2837 nxge_status_t status = NXGE_OK; 2838 2839 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2840 "==> nxge_txdma_start_channel (channel %d)", channel)); 2841 /* 2842 * TXDMA/TXC must be in stopped state. 2843 */ 2844 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2845 2846 /* 2847 * Reset TXDMA channel 2848 */ 2849 tx_ring_p->tx_cs.value = 0; 2850 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2851 status = nxge_reset_txdma_channel(nxgep, channel, 2852 tx_ring_p->tx_cs.value); 2853 if (status != NXGE_OK) { 2854 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2855 "==> nxge_txdma_start_channel (channel %d)" 2856 " reset channel failed 0x%x", channel, status)); 2857 goto nxge_txdma_start_channel_exit; 2858 } 2859 2860 /* 2861 * Initialize the TXDMA channel specific FZC control 2862 * configurations. These FZC registers are pertaining 2863 * to each TX channel (i.e. logical pages). 2864 */ 2865 if (!isLDOMguest(nxgep)) { 2866 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2867 tx_ring_p, tx_mbox_p); 2868 if (status != NXGE_OK) { 2869 goto nxge_txdma_start_channel_exit; 2870 } 2871 } 2872 2873 /* 2874 * Initialize the event masks. 2875 */ 2876 tx_ring_p->tx_evmask.value = 0; 2877 status = nxge_init_txdma_channel_event_mask(nxgep, 2878 channel, &tx_ring_p->tx_evmask); 2879 if (status != NXGE_OK) { 2880 goto nxge_txdma_start_channel_exit; 2881 } 2882 2883 /* 2884 * Load TXDMA descriptors, buffers, mailbox, 2885 * initialise the DMA channels and 2886 * enable each DMA channel. 2887 */ 2888 status = nxge_enable_txdma_channel(nxgep, channel, 2889 tx_ring_p, tx_mbox_p); 2890 if (status != NXGE_OK) { 2891 goto nxge_txdma_start_channel_exit; 2892 } 2893 2894 nxge_txdma_start_channel_exit: 2895 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2896 2897 return (status); 2898 } 2899 2900 /* 2901 * nxge_txdma_stop_channel 2902 * 2903 * Stop a TDC. 2904 * 2905 * Arguments: 2906 * nxgep 2907 * channel The channel to stop. 2908 * tx_ring_p channel's transmit descriptor ring. 2909 * tx_mbox_p channel' smailbox. 2910 * 2911 * Notes: 2912 * 2913 * NPI/NXGE function calls: 2914 * nxge_txdma_stop_inj_err() 2915 * nxge_reset_txdma_channel() 2916 * nxge_init_txdma_channel_event_mask() 2917 * nxge_init_txdma_channel_cntl_stat() 2918 * nxge_disable_txdma_channel() 2919 * 2920 * Registers accessed: 2921 * none directly (see functions above). 2922 * 2923 * Context: 2924 * Any domain 2925 */ 2926 /*ARGSUSED*/ 2927 static nxge_status_t 2928 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2929 { 2930 p_tx_ring_t tx_ring_p; 2931 int status = NXGE_OK; 2932 2933 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2934 "==> nxge_txdma_stop_channel: channel %d", channel)); 2935 2936 /* 2937 * Stop (disable) TXDMA and TXC (if stop bit is set 2938 * and STOP_N_GO bit not set, the TXDMA reset state will 2939 * not be set if reset TXDMA. 2940 */ 2941 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2942 2943 tx_ring_p = nxgep->tx_rings->rings[channel]; 2944 2945 /* 2946 * Reset TXDMA channel 2947 */ 2948 tx_ring_p->tx_cs.value = 0; 2949 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2950 status = nxge_reset_txdma_channel(nxgep, channel, 2951 tx_ring_p->tx_cs.value); 2952 if (status != NXGE_OK) { 2953 goto nxge_txdma_stop_channel_exit; 2954 } 2955 2956 #ifdef HARDWARE_REQUIRED 2957 /* Set up the interrupt event masks. */ 2958 tx_ring_p->tx_evmask.value = 0; 2959 status = nxge_init_txdma_channel_event_mask(nxgep, 2960 channel, &tx_ring_p->tx_evmask); 2961 if (status != NXGE_OK) { 2962 goto nxge_txdma_stop_channel_exit; 2963 } 2964 2965 /* Initialize the DMA control and status register */ 2966 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2967 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2968 tx_ring_p->tx_cs.value); 2969 if (status != NXGE_OK) { 2970 goto nxge_txdma_stop_channel_exit; 2971 } 2972 2973 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2974 2975 /* Disable channel */ 2976 status = nxge_disable_txdma_channel(nxgep, channel, 2977 tx_ring_p, tx_mbox_p); 2978 if (status != NXGE_OK) { 2979 goto nxge_txdma_start_channel_exit; 2980 } 2981 2982 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2983 "==> nxge_txdma_stop_channel: event done")); 2984 2985 #endif 2986 2987 nxge_txdma_stop_channel_exit: 2988 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2989 return (status); 2990 } 2991 2992 /* 2993 * nxge_txdma_get_ring 2994 * 2995 * Get the ring for a TDC. 2996 * 2997 * Arguments: 2998 * nxgep 2999 * channel 3000 * 3001 * Notes: 3002 * 3003 * NPI/NXGE function calls: 3004 * 3005 * Registers accessed: 3006 * 3007 * Context: 3008 * Any domain 3009 */ 3010 static p_tx_ring_t 3011 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 3012 { 3013 nxge_grp_set_t *set = &nxgep->tx_set; 3014 int tdc; 3015 3016 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 3017 3018 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3019 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3020 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 3021 goto return_null; 3022 } 3023 3024 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3025 if ((1 << tdc) & set->owned.map) { 3026 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3027 if (ring) { 3028 if (channel == ring->tdc) { 3029 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3030 "<== nxge_txdma_get_ring: " 3031 "tdc %d ring $%p", tdc, ring)); 3032 return (ring); 3033 } 3034 } 3035 } 3036 } 3037 3038 return_null: 3039 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 3040 "ring not found")); 3041 3042 return (NULL); 3043 } 3044 3045 /* 3046 * nxge_txdma_get_mbox 3047 * 3048 * Get the mailbox for a TDC. 3049 * 3050 * Arguments: 3051 * nxgep 3052 * channel 3053 * 3054 * Notes: 3055 * 3056 * NPI/NXGE function calls: 3057 * 3058 * Registers accessed: 3059 * 3060 * Context: 3061 * Any domain 3062 */ 3063 static p_tx_mbox_t 3064 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3065 { 3066 nxge_grp_set_t *set = &nxgep->tx_set; 3067 int tdc; 3068 3069 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3070 3071 if (nxgep->tx_mbox_areas_p == 0 || 3072 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3073 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3074 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3075 goto return_null; 3076 } 3077 3078 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3079 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3080 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3081 goto return_null; 3082 } 3083 3084 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3085 if ((1 << tdc) & set->owned.map) { 3086 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3087 if (ring) { 3088 if (channel == ring->tdc) { 3089 tx_mbox_t *mailbox = nxgep-> 3090 tx_mbox_areas_p-> 3091 txmbox_areas_p[tdc]; 3092 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3093 "<== nxge_txdma_get_mbox: tdc %d " 3094 "ring $%p", tdc, mailbox)); 3095 return (mailbox); 3096 } 3097 } 3098 } 3099 } 3100 3101 return_null: 3102 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3103 "mailbox not found")); 3104 3105 return (NULL); 3106 } 3107 3108 /* 3109 * nxge_tx_err_evnts 3110 * 3111 * Recover a TDC. 3112 * 3113 * Arguments: 3114 * nxgep 3115 * index The index to the TDC ring. 3116 * ldvp Used to get the channel number ONLY. 3117 * cs A copy of the bits from TX_CS. 3118 * 3119 * Notes: 3120 * Calling tree: 3121 * nxge_tx_intr() 3122 * 3123 * NPI/NXGE function calls: 3124 * npi_txdma_ring_error_get() 3125 * npi_txdma_inj_par_error_get() 3126 * nxge_txdma_fatal_err_recover() 3127 * 3128 * Registers accessed: 3129 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3130 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3131 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3132 * 3133 * Context: 3134 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3135 */ 3136 /*ARGSUSED*/ 3137 static nxge_status_t 3138 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3139 { 3140 npi_handle_t handle; 3141 npi_status_t rs; 3142 uint8_t channel; 3143 p_tx_ring_t *tx_rings; 3144 p_tx_ring_t tx_ring_p; 3145 p_nxge_tx_ring_stats_t tdc_stats; 3146 boolean_t txchan_fatal = B_FALSE; 3147 nxge_status_t status = NXGE_OK; 3148 tdmc_inj_par_err_t par_err; 3149 uint32_t value; 3150 3151 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3152 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3153 channel = ldvp->channel; 3154 3155 tx_rings = nxgep->tx_rings->rings; 3156 tx_ring_p = tx_rings[index]; 3157 tdc_stats = tx_ring_p->tdc_stats; 3158 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3159 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3160 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3161 if ((rs = npi_txdma_ring_error_get(handle, channel, 3162 &tdc_stats->errlog)) != NPI_SUCCESS) 3163 return (NXGE_ERROR | rs); 3164 } 3165 3166 if (cs.bits.ldw.mbox_err) { 3167 tdc_stats->mbox_err++; 3168 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3169 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3171 "==> nxge_tx_err_evnts(channel %d): " 3172 "fatal error: mailbox", channel)); 3173 txchan_fatal = B_TRUE; 3174 } 3175 if (cs.bits.ldw.pkt_size_err) { 3176 tdc_stats->pkt_size_err++; 3177 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3178 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3180 "==> nxge_tx_err_evnts(channel %d): " 3181 "fatal error: pkt_size_err", channel)); 3182 txchan_fatal = B_TRUE; 3183 } 3184 if (cs.bits.ldw.tx_ring_oflow) { 3185 tdc_stats->tx_ring_oflow++; 3186 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3187 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3188 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3189 "==> nxge_tx_err_evnts(channel %d): " 3190 "fatal error: tx_ring_oflow", channel)); 3191 txchan_fatal = B_TRUE; 3192 } 3193 if (cs.bits.ldw.pref_buf_par_err) { 3194 tdc_stats->pre_buf_par_err++; 3195 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3196 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3197 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3198 "==> nxge_tx_err_evnts(channel %d): " 3199 "fatal error: pre_buf_par_err", channel)); 3200 /* Clear error injection source for parity error */ 3201 (void) npi_txdma_inj_par_error_get(handle, &value); 3202 par_err.value = value; 3203 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3204 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3205 txchan_fatal = B_TRUE; 3206 } 3207 if (cs.bits.ldw.nack_pref) { 3208 tdc_stats->nack_pref++; 3209 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3210 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3211 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3212 "==> nxge_tx_err_evnts(channel %d): " 3213 "fatal error: nack_pref", channel)); 3214 txchan_fatal = B_TRUE; 3215 } 3216 if (cs.bits.ldw.nack_pkt_rd) { 3217 tdc_stats->nack_pkt_rd++; 3218 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3219 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3221 "==> nxge_tx_err_evnts(channel %d): " 3222 "fatal error: nack_pkt_rd", channel)); 3223 txchan_fatal = B_TRUE; 3224 } 3225 if (cs.bits.ldw.conf_part_err) { 3226 tdc_stats->conf_part_err++; 3227 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3228 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3230 "==> nxge_tx_err_evnts(channel %d): " 3231 "fatal error: config_partition_err", channel)); 3232 txchan_fatal = B_TRUE; 3233 } 3234 if (cs.bits.ldw.pkt_prt_err) { 3235 tdc_stats->pkt_part_err++; 3236 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3237 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3239 "==> nxge_tx_err_evnts(channel %d): " 3240 "fatal error: pkt_prt_err", channel)); 3241 txchan_fatal = B_TRUE; 3242 } 3243 3244 /* Clear error injection source in case this is an injected error */ 3245 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3246 3247 if (txchan_fatal) { 3248 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3249 " nxge_tx_err_evnts: " 3250 " fatal error on channel %d cs 0x%llx\n", 3251 channel, cs.value)); 3252 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3253 tx_ring_p); 3254 if (status == NXGE_OK) { 3255 FM_SERVICE_RESTORED(nxgep); 3256 } 3257 } 3258 3259 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3260 3261 return (status); 3262 } 3263 3264 static nxge_status_t 3265 nxge_txdma_fatal_err_recover( 3266 p_nxge_t nxgep, 3267 uint16_t channel, 3268 p_tx_ring_t tx_ring_p) 3269 { 3270 npi_handle_t handle; 3271 npi_status_t rs = NPI_SUCCESS; 3272 p_tx_mbox_t tx_mbox_p; 3273 nxge_status_t status = NXGE_OK; 3274 3275 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3276 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3277 "Recovering from TxDMAChannel#%d error...", channel)); 3278 3279 /* 3280 * Stop the dma channel waits for the stop done. 3281 * If the stop done bit is not set, then create 3282 * an error. 3283 */ 3284 3285 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3286 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3287 MUTEX_ENTER(&tx_ring_p->lock); 3288 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3289 if (rs != NPI_SUCCESS) { 3290 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3291 "==> nxge_txdma_fatal_err_recover (channel %d): " 3292 "stop failed ", channel)); 3293 goto fail; 3294 } 3295 3296 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3297 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3298 3299 /* 3300 * Reset TXDMA channel 3301 */ 3302 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3303 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3304 NPI_SUCCESS) { 3305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3306 "==> nxge_txdma_fatal_err_recover (channel %d)" 3307 " reset channel failed 0x%x", channel, rs)); 3308 goto fail; 3309 } 3310 3311 /* 3312 * Reset the tail (kick) register to 0. 3313 * (Hardware will not reset it. Tx overflow fatal 3314 * error if tail is not set to 0 after reset! 3315 */ 3316 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3317 3318 /* Restart TXDMA channel */ 3319 3320 if (!isLDOMguest(nxgep)) { 3321 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3322 3323 // XXX This is a problem in HIO! 3324 /* 3325 * Initialize the TXDMA channel specific FZC control 3326 * configurations. These FZC registers are pertaining 3327 * to each TX channel (i.e. logical pages). 3328 */ 3329 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3330 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3331 tx_ring_p, tx_mbox_p); 3332 if (status != NXGE_OK) 3333 goto fail; 3334 } 3335 3336 /* 3337 * Initialize the event masks. 3338 */ 3339 tx_ring_p->tx_evmask.value = 0; 3340 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3341 &tx_ring_p->tx_evmask); 3342 if (status != NXGE_OK) 3343 goto fail; 3344 3345 tx_ring_p->wr_index_wrap = B_FALSE; 3346 tx_ring_p->wr_index = 0; 3347 tx_ring_p->rd_index = 0; 3348 3349 /* 3350 * Load TXDMA descriptors, buffers, mailbox, 3351 * initialise the DMA channels and 3352 * enable each DMA channel. 3353 */ 3354 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3355 status = nxge_enable_txdma_channel(nxgep, channel, 3356 tx_ring_p, tx_mbox_p); 3357 MUTEX_EXIT(&tx_ring_p->lock); 3358 if (status != NXGE_OK) 3359 goto fail; 3360 3361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3362 "Recovery Successful, TxDMAChannel#%d Restored", 3363 channel)); 3364 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3365 3366 return (NXGE_OK); 3367 3368 fail: 3369 MUTEX_EXIT(&tx_ring_p->lock); 3370 3371 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3372 "nxge_txdma_fatal_err_recover (channel %d): " 3373 "failed to recover this txdma channel", channel)); 3374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3375 3376 return (status); 3377 } 3378 3379 /* 3380 * nxge_tx_port_fatal_err_recover 3381 * 3382 * Attempt to recover from a fatal port error. 3383 * 3384 * Arguments: 3385 * nxgep 3386 * 3387 * Notes: 3388 * How would a guest do this? 3389 * 3390 * NPI/NXGE function calls: 3391 * 3392 * Registers accessed: 3393 * 3394 * Context: 3395 * Service domain 3396 */ 3397 nxge_status_t 3398 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3399 { 3400 nxge_grp_set_t *set = &nxgep->tx_set; 3401 nxge_channel_t tdc; 3402 3403 tx_ring_t *ring; 3404 tx_mbox_t *mailbox; 3405 3406 npi_handle_t handle; 3407 nxge_status_t status; 3408 npi_status_t rs; 3409 3410 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3411 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3412 "Recovering from TxPort error...")); 3413 3414 if (isLDOMguest(nxgep)) { 3415 return (NXGE_OK); 3416 } 3417 3418 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3419 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3420 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3421 return (NXGE_ERROR); 3422 } 3423 3424 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3425 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3426 "<== nxge_tx_port_fatal_err_recover: " 3427 "NULL ring pointer(s)")); 3428 return (NXGE_ERROR); 3429 } 3430 3431 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3432 if ((1 << tdc) & set->owned.map) { 3433 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3434 if (ring) 3435 MUTEX_ENTER(&ring->lock); 3436 } 3437 } 3438 3439 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3440 3441 /* 3442 * Stop all the TDCs owned by us. 3443 * (The shared TDCs will have been stopped by their owners.) 3444 */ 3445 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3446 if ((1 << tdc) & set->owned.map) { 3447 ring = nxgep->tx_rings->rings[tdc]; 3448 if (ring) { 3449 rs = npi_txdma_channel_control 3450 (handle, TXDMA_STOP, tdc); 3451 if (rs != NPI_SUCCESS) { 3452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3453 "nxge_tx_port_fatal_err_recover " 3454 "(channel %d): stop failed ", tdc)); 3455 goto fail; 3456 } 3457 } 3458 } 3459 } 3460 3461 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3462 3463 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3464 if ((1 << tdc) & set->owned.map) { 3465 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3466 if (ring) { 3467 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3468 } 3469 } 3470 } 3471 3472 /* 3473 * Reset all the TDCs. 3474 */ 3475 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3476 3477 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3478 if ((1 << tdc) & set->owned.map) { 3479 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3480 if (ring) { 3481 if ((rs = npi_txdma_channel_control 3482 (handle, TXDMA_RESET, tdc)) 3483 != NPI_SUCCESS) { 3484 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3485 "nxge_tx_port_fatal_err_recover " 3486 "(channel %d) reset channel " 3487 "failed 0x%x", tdc, rs)); 3488 goto fail; 3489 } 3490 } 3491 /* 3492 * Reset the tail (kick) register to 0. 3493 * (Hardware will not reset it. Tx overflow fatal 3494 * error if tail is not set to 0 after reset! 3495 */ 3496 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3497 } 3498 } 3499 3500 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3501 3502 /* Restart all the TDCs */ 3503 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3504 if ((1 << tdc) & set->owned.map) { 3505 ring = nxgep->tx_rings->rings[tdc]; 3506 if (ring) { 3507 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3508 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3509 ring, mailbox); 3510 ring->tx_evmask.value = 0; 3511 /* 3512 * Initialize the event masks. 3513 */ 3514 status = nxge_init_txdma_channel_event_mask 3515 (nxgep, tdc, &ring->tx_evmask); 3516 3517 ring->wr_index_wrap = B_FALSE; 3518 ring->wr_index = 0; 3519 ring->rd_index = 0; 3520 3521 if (status != NXGE_OK) 3522 goto fail; 3523 if (status != NXGE_OK) 3524 goto fail; 3525 } 3526 } 3527 } 3528 3529 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3530 3531 /* Re-enable all the TDCs */ 3532 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3533 if ((1 << tdc) & set->owned.map) { 3534 ring = nxgep->tx_rings->rings[tdc]; 3535 if (ring) { 3536 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3537 status = nxge_enable_txdma_channel(nxgep, tdc, 3538 ring, mailbox); 3539 if (status != NXGE_OK) 3540 goto fail; 3541 } 3542 } 3543 } 3544 3545 /* 3546 * Unlock all the TDCs. 3547 */ 3548 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3549 if ((1 << tdc) & set->owned.map) { 3550 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3551 if (ring) 3552 MUTEX_EXIT(&ring->lock); 3553 } 3554 } 3555 3556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3557 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3558 3559 return (NXGE_OK); 3560 3561 fail: 3562 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3563 if ((1 << tdc) & set->owned.map) { 3564 ring = nxgep->tx_rings->rings[tdc]; 3565 if (ring) 3566 MUTEX_EXIT(&ring->lock); 3567 } 3568 } 3569 3570 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3571 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3572 3573 return (status); 3574 } 3575 3576 /* 3577 * nxge_txdma_inject_err 3578 * 3579 * Inject an error into a TDC. 3580 * 3581 * Arguments: 3582 * nxgep 3583 * err_id The error to inject. 3584 * chan The channel to inject into. 3585 * 3586 * Notes: 3587 * This is called from nxge_main.c:nxge_err_inject() 3588 * Has this ioctl ever been used? 3589 * 3590 * NPI/NXGE function calls: 3591 * npi_txdma_inj_par_error_get() 3592 * npi_txdma_inj_par_error_set() 3593 * 3594 * Registers accessed: 3595 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3596 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3597 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3598 * 3599 * Context: 3600 * Service domain 3601 */ 3602 void 3603 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3604 { 3605 tdmc_intr_dbg_t tdi; 3606 tdmc_inj_par_err_t par_err; 3607 uint32_t value; 3608 npi_handle_t handle; 3609 3610 switch (err_id) { 3611 3612 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3613 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3614 /* Clear error injection source for parity error */ 3615 (void) npi_txdma_inj_par_error_get(handle, &value); 3616 par_err.value = value; 3617 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3618 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3619 3620 par_err.bits.ldw.inject_parity_error = (1 << chan); 3621 (void) npi_txdma_inj_par_error_get(handle, &value); 3622 par_err.value = value; 3623 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3624 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3625 (unsigned long long)par_err.value); 3626 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3627 break; 3628 3629 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3630 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3631 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3632 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3633 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3634 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3635 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3636 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3637 chan, &tdi.value); 3638 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3639 tdi.bits.ldw.pref_buf_par_err = 1; 3640 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3641 tdi.bits.ldw.mbox_err = 1; 3642 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3643 tdi.bits.ldw.nack_pref = 1; 3644 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3645 tdi.bits.ldw.nack_pkt_rd = 1; 3646 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3647 tdi.bits.ldw.pkt_size_err = 1; 3648 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3649 tdi.bits.ldw.tx_ring_oflow = 1; 3650 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3651 tdi.bits.ldw.conf_part_err = 1; 3652 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3653 tdi.bits.ldw.pkt_part_err = 1; 3654 #if defined(__i386) 3655 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3656 tdi.value); 3657 #else 3658 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3659 tdi.value); 3660 #endif 3661 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3662 chan, tdi.value); 3663 3664 break; 3665 } 3666 } 3667