1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/nxge/nxge_impl.h> 28 #include <sys/nxge/nxge_txdma.h> 29 #include <sys/nxge/nxge_hio.h> 30 #include <npi_tx_rd64.h> 31 #include <npi_tx_wr64.h> 32 #include <sys/llc1.h> 33 34 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 35 uint32_t nxge_tx_minfree = 64; 36 uint32_t nxge_tx_intr_thres = 0; 37 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 38 uint32_t nxge_tx_tiny_pack = 1; 39 uint32_t nxge_tx_use_bcopy = 1; 40 41 extern uint32_t nxge_tx_ring_size; 42 extern uint32_t nxge_bcopy_thresh; 43 extern uint32_t nxge_dvma_thresh; 44 extern uint32_t nxge_dma_stream_thresh; 45 extern dma_method_t nxge_force_dma; 46 extern uint32_t nxge_cksum_offload; 47 48 /* Device register access attributes for PIO. */ 49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 50 /* Device descriptor access attributes for DMA. */ 51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 52 /* Device buffer access attributes for DMA. */ 53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 54 extern ddi_dma_attr_t nxge_desc_dma_attr; 55 extern ddi_dma_attr_t nxge_tx_dma_attr; 56 57 extern void nxge_tx_ring_task(void *arg); 58 59 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 60 61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 62 63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 64 p_nxge_dma_common_t *, p_tx_ring_t *, 65 uint32_t, p_nxge_dma_common_t *, 66 p_tx_mbox_t *); 67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 68 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 72 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_tx_ring_t, 75 p_tx_mbox_t *); 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 77 p_tx_ring_t, p_tx_mbox_t); 78 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 82 83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 85 p_nxge_ldv_t, tx_cs_t); 86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 88 uint16_t, p_tx_ring_t); 89 90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 91 p_tx_ring_t ring_p, uint16_t channel); 92 93 nxge_status_t 94 nxge_init_txdma_channels(p_nxge_t nxgep) 95 { 96 nxge_grp_set_t *set = &nxgep->tx_set; 97 int i, tdc, count; 98 nxge_grp_t *group; 99 dc_map_t map; 100 int dev_gindex; 101 102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 103 104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 105 if ((1 << i) & set->lg.map) { 106 group = set->group[i]; 107 dev_gindex = 108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 109 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 111 if ((1 << tdc) & map) { 112 if ((nxge_grp_dc_add(nxgep, 113 group, VP_BOUND_TX, tdc))) 114 goto init_txdma_channels_exit; 115 } 116 } 117 } 118 if (++count == set->lg.count) 119 break; 120 } 121 122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 123 return (NXGE_OK); 124 125 init_txdma_channels_exit: 126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 127 if ((1 << i) & set->lg.map) { 128 group = set->group[i]; 129 dev_gindex = 130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 131 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 133 if ((1 << tdc) & map) { 134 nxge_grp_dc_remove(nxgep, 135 VP_BOUND_TX, tdc); 136 } 137 } 138 } 139 if (++count == set->lg.count) 140 break; 141 } 142 143 return (NXGE_ERROR); 144 145 } 146 147 nxge_status_t 148 nxge_init_txdma_channel( 149 p_nxge_t nxge, 150 int channel) 151 { 152 nxge_status_t status; 153 154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 155 156 status = nxge_map_txdma(nxge, channel); 157 if (status != NXGE_OK) { 158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 159 "<== nxge_init_txdma_channel: status 0x%x", status)); 160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 161 return (status); 162 } 163 164 status = nxge_txdma_hw_start(nxge, channel); 165 if (status != NXGE_OK) { 166 (void) nxge_unmap_txdma_channel(nxge, channel); 167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 168 return (status); 169 } 170 171 if (!nxge->statsp->tdc_ksp[channel]) 172 nxge_setup_tdc_kstats(nxge, channel); 173 174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 175 176 return (status); 177 } 178 179 void 180 nxge_uninit_txdma_channels(p_nxge_t nxgep) 181 { 182 nxge_grp_set_t *set = &nxgep->tx_set; 183 int tdc; 184 185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 186 187 if (set->owned.map == 0) { 188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 189 "nxge_uninit_txdma_channels: no channels")); 190 return; 191 } 192 193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 194 if ((1 << tdc) & set->owned.map) { 195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 196 } 197 } 198 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 200 } 201 202 void 203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 204 { 205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 206 207 if (nxgep->statsp->tdc_ksp[channel]) { 208 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 209 nxgep->statsp->tdc_ksp[channel] = 0; 210 } 211 212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK) 213 goto nxge_uninit_txdma_channel_exit; 214 215 nxge_unmap_txdma_channel(nxgep, channel); 216 217 nxge_uninit_txdma_channel_exit: 218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel")); 219 } 220 221 void 222 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 223 uint32_t entries, uint32_t size) 224 { 225 size_t tsize; 226 *dest_p = *src_p; 227 tsize = size * entries; 228 dest_p->alength = tsize; 229 dest_p->nblocks = entries; 230 dest_p->block_size = size; 231 dest_p->offset += tsize; 232 233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 234 src_p->alength -= tsize; 235 src_p->dma_cookie.dmac_laddress += tsize; 236 src_p->dma_cookie.dmac_size -= tsize; 237 } 238 239 /* 240 * nxge_reset_txdma_channel 241 * 242 * Reset a TDC. 243 * 244 * Arguments: 245 * nxgep 246 * channel The channel to reset. 247 * reg_data The current TX_CS. 248 * 249 * Notes: 250 * 251 * NPI/NXGE function calls: 252 * npi_txdma_channel_reset() 253 * npi_txdma_channel_control() 254 * 255 * Registers accessed: 256 * TX_CS DMC+0x40028 Transmit Control And Status 257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 258 * 259 * Context: 260 * Any domain 261 */ 262 nxge_status_t 263 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 264 { 265 npi_status_t rs = NPI_SUCCESS; 266 nxge_status_t status = NXGE_OK; 267 npi_handle_t handle; 268 269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 270 271 handle = NXGE_DEV_NPI_HANDLE(nxgep); 272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 273 rs = npi_txdma_channel_reset(handle, channel); 274 } else { 275 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 276 channel); 277 } 278 279 if (rs != NPI_SUCCESS) { 280 status = NXGE_ERROR | rs; 281 } 282 283 /* 284 * Reset the tail (kick) register to 0. 285 * (Hardware will not reset it. Tx overflow fatal 286 * error if tail is not set to 0 after reset! 287 */ 288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 289 290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 291 return (status); 292 } 293 294 /* 295 * nxge_init_txdma_channel_event_mask 296 * 297 * Enable interrupts for a set of events. 298 * 299 * Arguments: 300 * nxgep 301 * channel The channel to map. 302 * mask_p The events to enable. 303 * 304 * Notes: 305 * 306 * NPI/NXGE function calls: 307 * npi_txdma_event_mask() 308 * 309 * Registers accessed: 310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 311 * 312 * Context: 313 * Any domain 314 */ 315 nxge_status_t 316 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 317 p_tx_dma_ent_msk_t mask_p) 318 { 319 npi_handle_t handle; 320 npi_status_t rs = NPI_SUCCESS; 321 nxge_status_t status = NXGE_OK; 322 323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 324 "<== nxge_init_txdma_channel_event_mask")); 325 326 handle = NXGE_DEV_NPI_HANDLE(nxgep); 327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 328 if (rs != NPI_SUCCESS) { 329 status = NXGE_ERROR | rs; 330 } 331 332 return (status); 333 } 334 335 /* 336 * nxge_init_txdma_channel_cntl_stat 337 * 338 * Stop a TDC. If at first we don't succeed, inject an error. 339 * 340 * Arguments: 341 * nxgep 342 * channel The channel to stop. 343 * 344 * Notes: 345 * 346 * NPI/NXGE function calls: 347 * npi_txdma_control_status() 348 * 349 * Registers accessed: 350 * TX_CS DMC+0x40028 Transmit Control And Status 351 * 352 * Context: 353 * Any domain 354 */ 355 nxge_status_t 356 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 357 uint64_t reg_data) 358 { 359 npi_handle_t handle; 360 npi_status_t rs = NPI_SUCCESS; 361 nxge_status_t status = NXGE_OK; 362 363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 364 "<== nxge_init_txdma_channel_cntl_stat")); 365 366 handle = NXGE_DEV_NPI_HANDLE(nxgep); 367 rs = npi_txdma_control_status(handle, OP_SET, channel, 368 (p_tx_cs_t)®_data); 369 370 if (rs != NPI_SUCCESS) { 371 status = NXGE_ERROR | rs; 372 } 373 374 return (status); 375 } 376 377 /* 378 * nxge_enable_txdma_channel 379 * 380 * Enable a TDC. 381 * 382 * Arguments: 383 * nxgep 384 * channel The channel to enable. 385 * tx_desc_p channel's transmit descriptor ring. 386 * mbox_p channel's mailbox, 387 * 388 * Notes: 389 * 390 * NPI/NXGE function calls: 391 * npi_txdma_ring_config() 392 * npi_txdma_mbox_config() 393 * npi_txdma_channel_init_enable() 394 * 395 * Registers accessed: 396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 399 * TX_CS DMC+0x40028 Transmit Control And Status 400 * 401 * Context: 402 * Any domain 403 */ 404 nxge_status_t 405 nxge_enable_txdma_channel(p_nxge_t nxgep, 406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 407 { 408 npi_handle_t handle; 409 npi_status_t rs = NPI_SUCCESS; 410 nxge_status_t status = NXGE_OK; 411 412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 413 414 handle = NXGE_DEV_NPI_HANDLE(nxgep); 415 /* 416 * Use configuration data composed at init time. 417 * Write to hardware the transmit ring configurations. 418 */ 419 rs = npi_txdma_ring_config(handle, OP_SET, channel, 420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 421 422 if (rs != NPI_SUCCESS) { 423 return (NXGE_ERROR | rs); 424 } 425 426 if (isLDOMguest(nxgep)) { 427 /* Add interrupt handler for this channel. */ 428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 429 return (NXGE_ERROR); 430 } 431 432 /* Write to hardware the mailbox */ 433 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 435 436 if (rs != NPI_SUCCESS) { 437 return (NXGE_ERROR | rs); 438 } 439 440 /* Start the DMA engine. */ 441 rs = npi_txdma_channel_init_enable(handle, channel); 442 443 if (rs != NPI_SUCCESS) { 444 return (NXGE_ERROR | rs); 445 } 446 447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 448 449 return (status); 450 } 451 452 void 453 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 454 boolean_t l4_cksum, int pkt_len, uint8_t npads, 455 p_tx_pkt_hdr_all_t pkthdrp, 456 t_uscalar_t start_offset, 457 t_uscalar_t stuff_offset) 458 { 459 p_tx_pkt_header_t hdrp; 460 p_mblk_t nmp; 461 uint64_t tmp; 462 size_t mblk_len; 463 size_t iph_len; 464 size_t hdrs_size; 465 uint8_t hdrs_buf[sizeof (struct ether_header) + 466 64 + sizeof (uint32_t)]; 467 uint8_t *cursor; 468 uint8_t *ip_buf; 469 uint16_t eth_type; 470 uint8_t ipproto; 471 boolean_t is_vlan = B_FALSE; 472 size_t eth_hdr_size; 473 474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 475 476 /* 477 * Caller should zero out the headers first. 478 */ 479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 480 481 if (fill_len) { 482 NXGE_DEBUG_MSG((NULL, TX_CTL, 483 "==> nxge_fill_tx_hdr: pkt_len %d " 484 "npads %d", pkt_len, npads)); 485 tmp = (uint64_t)pkt_len; 486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 487 goto fill_tx_header_done; 488 } 489 490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 491 492 /* 493 * mp is the original data packet (does not include the 494 * Neptune transmit header). 495 */ 496 nmp = mp; 497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 498 "mp $%p b_rptr $%p len %d", 499 mp, nmp->b_rptr, MBLKL(nmp))); 500 /* copy ether_header from mblk to hdrs_buf */ 501 cursor = &hdrs_buf[0]; 502 tmp = sizeof (struct ether_vlan_header); 503 while ((nmp != NULL) && (tmp > 0)) { 504 size_t buflen; 505 mblk_len = MBLKL(nmp); 506 buflen = min((size_t)tmp, mblk_len); 507 bcopy(nmp->b_rptr, cursor, buflen); 508 cursor += buflen; 509 tmp -= buflen; 510 nmp = nmp->b_cont; 511 } 512 513 nmp = mp; 514 mblk_len = MBLKL(nmp); 515 ip_buf = NULL; 516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 518 "ether type 0x%x", eth_type, hdrp->value)); 519 520 if (eth_type < ETHERMTU) { 521 tmp = 1ull; 522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 524 "value 0x%llx", hdrp->value)); 525 if (*(hdrs_buf + sizeof (struct ether_header)) 526 == LLC_SNAP_SAP) { 527 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 528 sizeof (struct ether_header) + 6))); 529 NXGE_DEBUG_MSG((NULL, TX_CTL, 530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 531 eth_type)); 532 } else { 533 goto fill_tx_header_done; 534 } 535 } else if (eth_type == VLAN_ETHERTYPE) { 536 tmp = 1ull; 537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 538 539 eth_type = ntohs(((struct ether_vlan_header *) 540 hdrs_buf)->ether_type); 541 is_vlan = B_TRUE; 542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 543 "value 0x%llx", hdrp->value)); 544 } 545 546 if (!is_vlan) { 547 eth_hdr_size = sizeof (struct ether_header); 548 } else { 549 eth_hdr_size = sizeof (struct ether_vlan_header); 550 } 551 552 switch (eth_type) { 553 case ETHERTYPE_IP: 554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 555 ip_buf = nmp->b_rptr + eth_hdr_size; 556 mblk_len -= eth_hdr_size; 557 iph_len = ((*ip_buf) & 0x0f); 558 if (mblk_len > (iph_len + sizeof (uint32_t))) { 559 ip_buf = nmp->b_rptr; 560 ip_buf += eth_hdr_size; 561 } else { 562 ip_buf = NULL; 563 } 564 565 } 566 if (ip_buf == NULL) { 567 hdrs_size = 0; 568 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 569 while ((nmp) && (hdrs_size < 570 sizeof (hdrs_buf))) { 571 mblk_len = (size_t)nmp->b_wptr - 572 (size_t)nmp->b_rptr; 573 if (mblk_len >= 574 (sizeof (hdrs_buf) - hdrs_size)) 575 mblk_len = sizeof (hdrs_buf) - 576 hdrs_size; 577 bcopy(nmp->b_rptr, 578 &hdrs_buf[hdrs_size], mblk_len); 579 hdrs_size += mblk_len; 580 nmp = nmp->b_cont; 581 } 582 ip_buf = hdrs_buf; 583 ip_buf += eth_hdr_size; 584 iph_len = ((*ip_buf) & 0x0f); 585 } 586 587 ipproto = ip_buf[9]; 588 589 tmp = (uint64_t)iph_len; 590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 591 tmp = (uint64_t)(eth_hdr_size >> 1); 592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 593 594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 596 "tmp 0x%x", 597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 598 ipproto, tmp)); 599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 600 "value 0x%llx", hdrp->value)); 601 602 break; 603 604 case ETHERTYPE_IPV6: 605 hdrs_size = 0; 606 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 607 while ((nmp) && (hdrs_size < 608 sizeof (hdrs_buf))) { 609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 610 if (mblk_len >= 611 (sizeof (hdrs_buf) - hdrs_size)) 612 mblk_len = sizeof (hdrs_buf) - 613 hdrs_size; 614 bcopy(nmp->b_rptr, 615 &hdrs_buf[hdrs_size], mblk_len); 616 hdrs_size += mblk_len; 617 nmp = nmp->b_cont; 618 } 619 ip_buf = hdrs_buf; 620 ip_buf += eth_hdr_size; 621 622 tmp = 1ull; 623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 624 625 tmp = (eth_hdr_size >> 1); 626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 627 628 /* byte 6 is the next header protocol */ 629 ipproto = ip_buf[6]; 630 631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 634 ipproto)); 635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 636 "value 0x%llx", hdrp->value)); 637 638 break; 639 640 default: 641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 642 goto fill_tx_header_done; 643 } 644 645 switch (ipproto) { 646 case IPPROTO_TCP: 647 NXGE_DEBUG_MSG((NULL, TX_CTL, 648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 649 if (l4_cksum) { 650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 651 hdrp->value |= 652 (((uint64_t)(start_offset >> 1)) << 653 TX_PKT_HEADER_L4START_SHIFT); 654 hdrp->value |= 655 (((uint64_t)(stuff_offset >> 1)) << 656 TX_PKT_HEADER_L4STUFF_SHIFT); 657 658 NXGE_DEBUG_MSG((NULL, TX_CTL, 659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 660 "value 0x%llx", hdrp->value)); 661 } 662 663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 664 "value 0x%llx", hdrp->value)); 665 break; 666 667 case IPPROTO_UDP: 668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 669 if (l4_cksum) { 670 if (!nxge_cksum_offload) { 671 uint16_t *up; 672 uint16_t cksum; 673 t_uscalar_t stuff_len; 674 675 /* 676 * The checksum field has the 677 * partial checksum. 678 * IP_CSUM() macro calls ip_cksum() which 679 * can add in the partial checksum. 680 */ 681 cksum = IP_CSUM(mp, start_offset, 0); 682 stuff_len = stuff_offset; 683 nmp = mp; 684 mblk_len = MBLKL(nmp); 685 while ((nmp != NULL) && 686 (mblk_len < stuff_len)) { 687 stuff_len -= mblk_len; 688 nmp = nmp->b_cont; 689 if (nmp) 690 mblk_len = MBLKL(nmp); 691 } 692 ASSERT(nmp); 693 up = (uint16_t *)(nmp->b_rptr + stuff_len); 694 695 *up = cksum; 696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 697 NXGE_DEBUG_MSG((NULL, TX_CTL, 698 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 699 "use sw cksum " 700 "write to $%p cksum 0x%x content up 0x%x", 701 stuff_len, 702 up, 703 cksum, 704 *up)); 705 } else { 706 /* Hardware will compute the full checksum */ 707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 708 hdrp->value |= 709 (((uint64_t)(start_offset >> 1)) << 710 TX_PKT_HEADER_L4START_SHIFT); 711 hdrp->value |= 712 (((uint64_t)(stuff_offset >> 1)) << 713 TX_PKT_HEADER_L4STUFF_SHIFT); 714 715 NXGE_DEBUG_MSG((NULL, TX_CTL, 716 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 717 " use partial checksum " 718 "cksum 0x%x ", 719 "value 0x%llx", 720 stuff_offset, 721 IP_CSUM(mp, start_offset, 0), 722 hdrp->value)); 723 } 724 } 725 726 NXGE_DEBUG_MSG((NULL, TX_CTL, 727 "==> nxge_tx_pkt_hdr_init: UDP" 728 "value 0x%llx", hdrp->value)); 729 break; 730 731 default: 732 goto fill_tx_header_done; 733 } 734 735 fill_tx_header_done: 736 NXGE_DEBUG_MSG((NULL, TX_CTL, 737 "==> nxge_fill_tx_hdr: pkt_len %d " 738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 739 740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 741 } 742 743 /*ARGSUSED*/ 744 p_mblk_t 745 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 746 { 747 p_mblk_t newmp = NULL; 748 749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 750 NXGE_DEBUG_MSG((NULL, TX_CTL, 751 "<== nxge_tx_pkt_header_reserve: allocb failed")); 752 return (NULL); 753 } 754 755 NXGE_DEBUG_MSG((NULL, TX_CTL, 756 "==> nxge_tx_pkt_header_reserve: get new mp")); 757 DB_TYPE(newmp) = M_DATA; 758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 759 linkb(newmp, mp); 760 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 761 762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 763 "b_rptr $%p b_wptr $%p", 764 newmp->b_rptr, newmp->b_wptr)); 765 766 NXGE_DEBUG_MSG((NULL, TX_CTL, 767 "<== nxge_tx_pkt_header_reserve: use new mp")); 768 769 return (newmp); 770 } 771 772 int 773 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 774 { 775 uint_t nmblks; 776 ssize_t len; 777 uint_t pkt_len; 778 p_mblk_t nmp, bmp, tmp; 779 uint8_t *b_wptr; 780 781 NXGE_DEBUG_MSG((NULL, TX_CTL, 782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 784 785 nmp = mp; 786 bmp = mp; 787 nmblks = 0; 788 pkt_len = 0; 789 *tot_xfer_len_p = 0; 790 791 while (nmp) { 792 len = MBLKL(nmp); 793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 794 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 795 len, pkt_len, nmblks, 796 *tot_xfer_len_p)); 797 798 if (len <= 0) { 799 bmp = nmp; 800 nmp = nmp->b_cont; 801 NXGE_DEBUG_MSG((NULL, TX_CTL, 802 "==> nxge_tx_pkt_nmblocks: " 803 "len (0) pkt_len %d nmblks %d", 804 pkt_len, nmblks)); 805 continue; 806 } 807 808 *tot_xfer_len_p += len; 809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 810 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 811 len, pkt_len, nmblks, 812 *tot_xfer_len_p)); 813 814 if (len < nxge_bcopy_thresh) { 815 NXGE_DEBUG_MSG((NULL, TX_CTL, 816 "==> nxge_tx_pkt_nmblocks: " 817 "len %d (< thresh) pkt_len %d nmblks %d", 818 len, pkt_len, nmblks)); 819 if (pkt_len == 0) 820 nmblks++; 821 pkt_len += len; 822 if (pkt_len >= nxge_bcopy_thresh) { 823 pkt_len = 0; 824 len = 0; 825 nmp = bmp; 826 } 827 } else { 828 NXGE_DEBUG_MSG((NULL, TX_CTL, 829 "==> nxge_tx_pkt_nmblocks: " 830 "len %d (> thresh) pkt_len %d nmblks %d", 831 len, pkt_len, nmblks)); 832 pkt_len = 0; 833 nmblks++; 834 /* 835 * Hardware limits the transfer length to 4K. 836 * If len is more than 4K, we need to break 837 * it up to at most 2 more blocks. 838 */ 839 if (len > TX_MAX_TRANSFER_LENGTH) { 840 uint32_t nsegs; 841 842 nsegs = 1; 843 NXGE_DEBUG_MSG((NULL, TX_CTL, 844 "==> nxge_tx_pkt_nmblocks: " 845 "len %d pkt_len %d nmblks %d nsegs %d", 846 len, pkt_len, nmblks, nsegs)); 847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 848 ++nsegs; 849 } 850 do { 851 b_wptr = nmp->b_rptr + 852 TX_MAX_TRANSFER_LENGTH; 853 nmp->b_wptr = b_wptr; 854 if ((tmp = dupb(nmp)) == NULL) { 855 return (0); 856 } 857 tmp->b_rptr = b_wptr; 858 tmp->b_wptr = nmp->b_wptr; 859 tmp->b_cont = nmp->b_cont; 860 nmp->b_cont = tmp; 861 nmblks++; 862 if (--nsegs) { 863 nmp = tmp; 864 } 865 } while (nsegs); 866 nmp = tmp; 867 } 868 } 869 870 /* 871 * Hardware limits the transmit gather pointers to 15. 872 */ 873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 874 TX_MAX_GATHER_POINTERS) { 875 NXGE_DEBUG_MSG((NULL, TX_CTL, 876 "==> nxge_tx_pkt_nmblocks: pull msg - " 877 "len %d pkt_len %d nmblks %d", 878 len, pkt_len, nmblks)); 879 /* Pull all message blocks from b_cont */ 880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 881 return (0); 882 } 883 freemsg(nmp->b_cont); 884 nmp->b_cont = tmp; 885 pkt_len = 0; 886 } 887 bmp = nmp; 888 nmp = nmp->b_cont; 889 } 890 891 NXGE_DEBUG_MSG((NULL, TX_CTL, 892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 893 "nmblks %d len %d tot_xfer_len %d", 894 mp->b_rptr, mp->b_wptr, nmblks, 895 MBLKL(mp), *tot_xfer_len_p)); 896 897 return (nmblks); 898 } 899 900 boolean_t 901 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 902 { 903 boolean_t status = B_TRUE; 904 p_nxge_dma_common_t tx_desc_dma_p; 905 nxge_dma_common_t desc_area; 906 p_tx_desc_t tx_desc_ring_vp; 907 p_tx_desc_t tx_desc_p; 908 p_tx_desc_t tx_desc_pp; 909 tx_desc_t r_tx_desc; 910 p_tx_msg_t tx_msg_ring; 911 p_tx_msg_t tx_msg_p; 912 npi_handle_t handle; 913 tx_ring_hdl_t tx_head; 914 uint32_t pkt_len; 915 uint_t tx_rd_index; 916 uint16_t head_index, tail_index; 917 uint8_t tdc; 918 boolean_t head_wrap, tail_wrap; 919 p_nxge_tx_ring_stats_t tdc_stats; 920 int rc; 921 922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 923 924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 925 (nmblks != 0)); 926 NXGE_DEBUG_MSG((nxgep, TX_CTL, 927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 928 tx_ring_p->descs_pending, nxge_reclaim_pending, 929 nmblks)); 930 if (!status) { 931 tx_desc_dma_p = &tx_ring_p->tdc_desc; 932 desc_area = tx_ring_p->tdc_desc; 933 handle = NXGE_DEV_NPI_HANDLE(nxgep); 934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 935 tx_desc_ring_vp = 936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 937 tx_rd_index = tx_ring_p->rd_index; 938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 939 tx_msg_ring = tx_ring_p->tx_msg_ring; 940 tx_msg_p = &tx_msg_ring[tx_rd_index]; 941 tdc = tx_ring_p->tdc; 942 tdc_stats = tx_ring_p->tdc_stats; 943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 945 } 946 947 tail_index = tx_ring_p->wr_index; 948 tail_wrap = tx_ring_p->wr_index_wrap; 949 950 NXGE_DEBUG_MSG((nxgep, TX_CTL, 951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 952 "tail_index %d tail_wrap %d " 953 "tx_desc_p $%p ($%p) ", 954 tdc, tx_rd_index, tail_index, tail_wrap, 955 tx_desc_p, (*(uint64_t *)tx_desc_p))); 956 /* 957 * Read the hardware maintained transmit head 958 * and wrap around bit. 959 */ 960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 961 head_index = tx_head.bits.ldw.head; 962 head_wrap = tx_head.bits.ldw.wrap; 963 NXGE_DEBUG_MSG((nxgep, TX_CTL, 964 "==> nxge_txdma_reclaim: " 965 "tx_rd_index %d tail %d tail_wrap %d " 966 "head %d wrap %d", 967 tx_rd_index, tail_index, tail_wrap, 968 head_index, head_wrap)); 969 970 if (head_index == tail_index) { 971 if (TXDMA_RING_EMPTY(head_index, head_wrap, 972 tail_index, tail_wrap) && 973 (head_index == tx_rd_index)) { 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 975 "==> nxge_txdma_reclaim: EMPTY")); 976 return (B_TRUE); 977 } 978 979 NXGE_DEBUG_MSG((nxgep, TX_CTL, 980 "==> nxge_txdma_reclaim: Checking " 981 "if ring full")); 982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 983 tail_wrap)) { 984 NXGE_DEBUG_MSG((nxgep, TX_CTL, 985 "==> nxge_txdma_reclaim: full")); 986 return (B_FALSE); 987 } 988 } 989 990 NXGE_DEBUG_MSG((nxgep, TX_CTL, 991 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 992 993 tx_desc_pp = &r_tx_desc; 994 while ((tx_rd_index != head_index) && 995 (tx_ring_p->descs_pending != 0)) { 996 997 NXGE_DEBUG_MSG((nxgep, TX_CTL, 998 "==> nxge_txdma_reclaim: Checking if pending")); 999 1000 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1001 "==> nxge_txdma_reclaim: " 1002 "descs_pending %d ", 1003 tx_ring_p->descs_pending)); 1004 1005 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1006 "==> nxge_txdma_reclaim: " 1007 "(tx_rd_index %d head_index %d " 1008 "(tx_desc_p $%p)", 1009 tx_rd_index, head_index, 1010 tx_desc_p)); 1011 1012 tx_desc_pp->value = tx_desc_p->value; 1013 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1014 "==> nxge_txdma_reclaim: " 1015 "(tx_rd_index %d head_index %d " 1016 "tx_desc_p $%p (desc value 0x%llx) ", 1017 tx_rd_index, head_index, 1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 1019 1020 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1021 "==> nxge_txdma_reclaim: dump desc:")); 1022 1023 pkt_len = tx_desc_pp->bits.hdw.tr_len; 1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE); 1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 1026 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1027 "==> nxge_txdma_reclaim: pkt_len %d " 1028 "tdc channel %d opackets %d", 1029 pkt_len, 1030 tdc, 1031 tdc_stats->opackets)); 1032 1033 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1034 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1035 "tx_desc_p = $%p " 1036 "tx_desc_pp = $%p " 1037 "index = %d", 1038 tx_desc_p, 1039 tx_desc_pp, 1040 tx_ring_p->rd_index)); 1041 (void) dvma_unload(tx_msg_p->dvma_handle, 1042 0, -1); 1043 tx_msg_p->dvma_handle = NULL; 1044 if (tx_ring_p->dvma_wr_index == 1045 tx_ring_p->dvma_wrap_mask) { 1046 tx_ring_p->dvma_wr_index = 0; 1047 } else { 1048 tx_ring_p->dvma_wr_index++; 1049 } 1050 tx_ring_p->dvma_pending--; 1051 } else if (tx_msg_p->flags.dma_type == 1052 USE_DMA) { 1053 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1054 "==> nxge_txdma_reclaim: " 1055 "USE DMA")); 1056 if (rc = ddi_dma_unbind_handle 1057 (tx_msg_p->dma_handle)) { 1058 cmn_err(CE_WARN, "!nxge_reclaim: " 1059 "ddi_dma_unbind_handle " 1060 "failed. status %d", rc); 1061 } 1062 } 1063 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1064 "==> nxge_txdma_reclaim: count packets")); 1065 /* 1066 * count a chained packet only once. 1067 */ 1068 if (tx_msg_p->tx_message != NULL) { 1069 freemsg(tx_msg_p->tx_message); 1070 tx_msg_p->tx_message = NULL; 1071 } 1072 1073 tx_msg_p->flags.dma_type = USE_NONE; 1074 tx_rd_index = tx_ring_p->rd_index; 1075 tx_rd_index = (tx_rd_index + 1) & 1076 tx_ring_p->tx_wrap_mask; 1077 tx_ring_p->rd_index = tx_rd_index; 1078 tx_ring_p->descs_pending--; 1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1080 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1081 } 1082 1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 1085 if (status) { 1086 (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing, 1087 1, 0); 1088 } 1089 } else { 1090 status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 1091 (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 1092 } 1093 1094 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1095 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1096 1097 return (status); 1098 } 1099 1100 /* 1101 * nxge_tx_intr 1102 * 1103 * Process a TDC interrupt 1104 * 1105 * Arguments: 1106 * arg1 A Logical Device state Vector (LSV) data structure. 1107 * arg2 nxge_t * 1108 * 1109 * Notes: 1110 * 1111 * NPI/NXGE function calls: 1112 * npi_txdma_control_status() 1113 * npi_intr_ldg_mgmt_set() 1114 * 1115 * nxge_tx_err_evnts() 1116 * nxge_txdma_reclaim() 1117 * 1118 * Registers accessed: 1119 * TX_CS DMC+0x40028 Transmit Control And Status 1120 * PIO_LDSV 1121 * 1122 * Context: 1123 * Any domain 1124 */ 1125 uint_t 1126 nxge_tx_intr(char *arg1, char *arg2) 1127 { 1128 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1129 p_nxge_t nxgep = (p_nxge_t)arg2; 1130 p_nxge_ldg_t ldgp; 1131 uint8_t channel; 1132 uint32_t vindex; 1133 npi_handle_t handle; 1134 tx_cs_t cs; 1135 p_tx_ring_t *tx_rings; 1136 p_tx_ring_t tx_ring_p; 1137 npi_status_t rs = NPI_SUCCESS; 1138 uint_t serviced = DDI_INTR_UNCLAIMED; 1139 nxge_status_t status = NXGE_OK; 1140 1141 if (ldvp == NULL) { 1142 NXGE_DEBUG_MSG((NULL, INT_CTL, 1143 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1144 nxgep, ldvp)); 1145 return (DDI_INTR_UNCLAIMED); 1146 } 1147 1148 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1149 nxgep = ldvp->nxgep; 1150 } 1151 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1152 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1153 nxgep, ldvp)); 1154 1155 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1156 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1157 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1158 "<== nxge_tx_intr: interface not started or intialized")); 1159 return (DDI_INTR_CLAIMED); 1160 } 1161 1162 /* 1163 * This interrupt handler is for a specific 1164 * transmit dma channel. 1165 */ 1166 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1167 /* Get the control and status for this channel. */ 1168 channel = ldvp->channel; 1169 ldgp = ldvp->ldgp; 1170 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1171 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1172 "channel %d", 1173 nxgep, ldvp, channel)); 1174 1175 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1176 vindex = ldvp->vdma_index; 1177 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1178 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1179 channel, vindex, rs)); 1180 if (!rs && cs.bits.ldw.mk) { 1181 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1182 "==> nxge_tx_intr:channel %d ring index %d " 1183 "status 0x%08x (mk bit set)", 1184 channel, vindex, rs)); 1185 tx_rings = nxgep->tx_rings->rings; 1186 tx_ring_p = tx_rings[vindex]; 1187 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1188 "==> nxge_tx_intr:channel %d ring index %d " 1189 "status 0x%08x (mk bit set, calling reclaim)", 1190 channel, vindex, rs)); 1191 1192 nxge_tx_ring_task((void *)tx_ring_p); 1193 } 1194 1195 /* 1196 * Process other transmit control and status. 1197 * Check the ldv state. 1198 */ 1199 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1200 /* 1201 * Rearm this logical group if this is a single device 1202 * group. 1203 */ 1204 if (ldgp->nldvs == 1) { 1205 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1206 "==> nxge_tx_intr: rearm")); 1207 if (status == NXGE_OK) { 1208 if (isLDOMguest(nxgep)) { 1209 nxge_hio_ldgimgn(nxgep, ldgp); 1210 } else { 1211 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1212 B_TRUE, ldgp->ldg_timer); 1213 } 1214 } 1215 } 1216 1217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1218 serviced = DDI_INTR_CLAIMED; 1219 return (serviced); 1220 } 1221 1222 void 1223 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1224 { 1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1226 1227 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1228 1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1230 } 1231 1232 void 1233 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1234 { 1235 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1236 1237 (void) nxge_txdma_stop(nxgep); 1238 1239 (void) nxge_fixup_txdma_rings(nxgep); 1240 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1241 (void) nxge_tx_mac_enable(nxgep); 1242 (void) nxge_txdma_hw_kick(nxgep); 1243 1244 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1245 } 1246 1247 npi_status_t 1248 nxge_txdma_channel_disable( 1249 nxge_t *nxge, 1250 int channel) 1251 { 1252 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1253 npi_status_t rs; 1254 tdmc_intr_dbg_t intr_dbg; 1255 1256 /* 1257 * Stop the dma channel and wait for the stop-done. 1258 * If the stop-done bit is not present, then force 1259 * an error so TXC will stop. 1260 * All channels bound to this port need to be stopped 1261 * and reset after injecting an interrupt error. 1262 */ 1263 rs = npi_txdma_channel_disable(handle, channel); 1264 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1265 "==> nxge_txdma_channel_disable(%d) " 1266 "rs 0x%x", channel, rs)); 1267 if (rs != NPI_SUCCESS) { 1268 /* Inject any error */ 1269 intr_dbg.value = 0; 1270 intr_dbg.bits.ldw.nack_pref = 1; 1271 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1272 "==> nxge_txdma_hw_mode: " 1273 "channel %d (stop failed 0x%x) " 1274 "(inject err)", rs, channel)); 1275 (void) npi_txdma_inj_int_error_set( 1276 handle, channel, &intr_dbg); 1277 rs = npi_txdma_channel_disable(handle, channel); 1278 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1279 "==> nxge_txdma_hw_mode: " 1280 "channel %d (stop again 0x%x) " 1281 "(after inject err)", 1282 rs, channel)); 1283 } 1284 1285 return (rs); 1286 } 1287 1288 /* 1289 * nxge_txdma_hw_mode 1290 * 1291 * Toggle all TDCs on (enable) or off (disable). 1292 * 1293 * Arguments: 1294 * nxgep 1295 * enable Enable or disable a TDC. 1296 * 1297 * Notes: 1298 * 1299 * NPI/NXGE function calls: 1300 * npi_txdma_channel_enable(TX_CS) 1301 * npi_txdma_channel_disable(TX_CS) 1302 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1303 * 1304 * Registers accessed: 1305 * TX_CS DMC+0x40028 Transmit Control And Status 1306 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1307 * 1308 * Context: 1309 * Any domain 1310 */ 1311 nxge_status_t 1312 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1313 { 1314 nxge_grp_set_t *set = &nxgep->tx_set; 1315 1316 npi_handle_t handle; 1317 nxge_status_t status; 1318 npi_status_t rs; 1319 int tdc; 1320 1321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1322 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1323 1324 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1325 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1326 "<== nxge_txdma_mode: not initialized")); 1327 return (NXGE_ERROR); 1328 } 1329 1330 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1331 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1332 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1333 return (NXGE_ERROR); 1334 } 1335 1336 /* Enable or disable all of the TDCs owned by us. */ 1337 rs = 0; 1338 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1339 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1340 if ((1 << tdc) & set->owned.map) { 1341 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1342 if (ring) { 1343 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1344 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1345 if (enable) { 1346 rs = npi_txdma_channel_enable 1347 (handle, tdc); 1348 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1349 "==> nxge_txdma_hw_mode: " 1350 "channel %d (enable) rs 0x%x", 1351 tdc, rs)); 1352 } else { 1353 rs = nxge_txdma_channel_disable 1354 (nxgep, tdc); 1355 } 1356 } 1357 } 1358 } 1359 1360 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1361 1362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1363 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1364 1365 return (status); 1366 } 1367 1368 void 1369 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1370 { 1371 npi_handle_t handle; 1372 1373 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1374 "==> nxge_txdma_enable_channel: channel %d", channel)); 1375 1376 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1377 /* enable the transmit dma channels */ 1378 (void) npi_txdma_channel_enable(handle, channel); 1379 1380 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1381 } 1382 1383 void 1384 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1385 { 1386 npi_handle_t handle; 1387 1388 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1389 "==> nxge_txdma_disable_channel: channel %d", channel)); 1390 1391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1392 /* stop the transmit dma channels */ 1393 (void) npi_txdma_channel_disable(handle, channel); 1394 1395 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1396 } 1397 1398 /* 1399 * nxge_txdma_stop_inj_err 1400 * 1401 * Stop a TDC. If at first we don't succeed, inject an error. 1402 * 1403 * Arguments: 1404 * nxgep 1405 * channel The channel to stop. 1406 * 1407 * Notes: 1408 * 1409 * NPI/NXGE function calls: 1410 * npi_txdma_channel_disable() 1411 * npi_txdma_inj_int_error_set() 1412 * #if defined(NXGE_DEBUG) 1413 * nxge_txdma_regs_dump_channels(nxgep); 1414 * #endif 1415 * 1416 * Registers accessed: 1417 * TX_CS DMC+0x40028 Transmit Control And Status 1418 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1419 * 1420 * Context: 1421 * Any domain 1422 */ 1423 int 1424 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1425 { 1426 npi_handle_t handle; 1427 tdmc_intr_dbg_t intr_dbg; 1428 int status; 1429 npi_status_t rs = NPI_SUCCESS; 1430 1431 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1432 /* 1433 * Stop the dma channel waits for the stop done. 1434 * If the stop done bit is not set, then create 1435 * an error. 1436 */ 1437 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1438 rs = npi_txdma_channel_disable(handle, channel); 1439 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1440 if (status == NXGE_OK) { 1441 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1442 "<== nxge_txdma_stop_inj_err (channel %d): " 1443 "stopped OK", channel)); 1444 return (status); 1445 } 1446 1447 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1448 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1449 "injecting error", channel, rs)); 1450 /* Inject any error */ 1451 intr_dbg.value = 0; 1452 intr_dbg.bits.ldw.nack_pref = 1; 1453 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1454 1455 /* Stop done bit will be set as a result of error injection */ 1456 rs = npi_txdma_channel_disable(handle, channel); 1457 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1458 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1459 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1460 "<== nxge_txdma_stop_inj_err (channel %d): " 1461 "stopped OK ", channel)); 1462 return (status); 1463 } 1464 1465 #if defined(NXGE_DEBUG) 1466 nxge_txdma_regs_dump_channels(nxgep); 1467 #endif 1468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1469 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1470 " (injected error but still not stopped)", channel, rs)); 1471 1472 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1473 return (status); 1474 } 1475 1476 /*ARGSUSED*/ 1477 void 1478 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1479 { 1480 nxge_grp_set_t *set = &nxgep->tx_set; 1481 int tdc; 1482 1483 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1484 1485 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1486 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1487 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1488 return; 1489 } 1490 1491 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1492 if ((1 << tdc) & set->owned.map) { 1493 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1494 if (ring) { 1495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1496 "==> nxge_fixup_txdma_rings: channel %d", 1497 tdc)); 1498 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1499 } 1500 } 1501 } 1502 1503 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1504 } 1505 1506 /*ARGSUSED*/ 1507 void 1508 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1509 { 1510 p_tx_ring_t ring_p; 1511 1512 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1513 ring_p = nxge_txdma_get_ring(nxgep, channel); 1514 if (ring_p == NULL) { 1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1516 return; 1517 } 1518 1519 if (ring_p->tdc != channel) { 1520 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1521 "<== nxge_txdma_fix_channel: channel not matched " 1522 "ring tdc %d passed channel", 1523 ring_p->tdc, channel)); 1524 return; 1525 } 1526 1527 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1528 1529 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1530 } 1531 1532 /*ARGSUSED*/ 1533 void 1534 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1535 { 1536 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1537 1538 if (ring_p == NULL) { 1539 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1540 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1541 return; 1542 } 1543 1544 if (ring_p->tdc != channel) { 1545 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1546 "<== nxge_txdma_fixup_channel: channel not matched " 1547 "ring tdc %d passed channel", 1548 ring_p->tdc, channel)); 1549 return; 1550 } 1551 1552 MUTEX_ENTER(&ring_p->lock); 1553 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1554 ring_p->rd_index = 0; 1555 ring_p->wr_index = 0; 1556 ring_p->ring_head.value = 0; 1557 ring_p->ring_kick_tail.value = 0; 1558 ring_p->descs_pending = 0; 1559 MUTEX_EXIT(&ring_p->lock); 1560 1561 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1562 } 1563 1564 /*ARGSUSED*/ 1565 void 1566 nxge_txdma_hw_kick(p_nxge_t nxgep) 1567 { 1568 nxge_grp_set_t *set = &nxgep->tx_set; 1569 int tdc; 1570 1571 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1572 1573 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1574 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1575 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1576 return; 1577 } 1578 1579 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1580 if ((1 << tdc) & set->owned.map) { 1581 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1582 if (ring) { 1583 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1584 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1585 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1586 } 1587 } 1588 } 1589 1590 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1591 } 1592 1593 /*ARGSUSED*/ 1594 void 1595 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1596 { 1597 p_tx_ring_t ring_p; 1598 1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1600 1601 ring_p = nxge_txdma_get_ring(nxgep, channel); 1602 if (ring_p == NULL) { 1603 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1604 " nxge_txdma_kick_channel")); 1605 return; 1606 } 1607 1608 if (ring_p->tdc != channel) { 1609 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1610 "<== nxge_txdma_kick_channel: channel not matched " 1611 "ring tdc %d passed channel", 1612 ring_p->tdc, channel)); 1613 return; 1614 } 1615 1616 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1617 1618 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1619 } 1620 1621 /*ARGSUSED*/ 1622 void 1623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1624 { 1625 1626 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1627 1628 if (ring_p == NULL) { 1629 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1630 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1631 return; 1632 } 1633 1634 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1635 } 1636 1637 /* 1638 * nxge_check_tx_hang 1639 * 1640 * Check the state of all TDCs belonging to nxgep. 1641 * 1642 * Arguments: 1643 * nxgep 1644 * 1645 * Notes: 1646 * Called by nxge_hw.c:nxge_check_hw_state(). 1647 * 1648 * NPI/NXGE function calls: 1649 * 1650 * Registers accessed: 1651 * 1652 * Context: 1653 * Any domain 1654 */ 1655 /*ARGSUSED*/ 1656 void 1657 nxge_check_tx_hang(p_nxge_t nxgep) 1658 { 1659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1660 1661 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1662 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1663 goto nxge_check_tx_hang_exit; 1664 } 1665 1666 /* 1667 * Needs inputs from hardware for regs: 1668 * head index had not moved since last timeout. 1669 * packets not transmitted or stuffed registers. 1670 */ 1671 if (nxge_txdma_hung(nxgep)) { 1672 nxge_fixup_hung_txdma_rings(nxgep); 1673 } 1674 1675 nxge_check_tx_hang_exit: 1676 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1677 } 1678 1679 /* 1680 * nxge_txdma_hung 1681 * 1682 * Reset a TDC. 1683 * 1684 * Arguments: 1685 * nxgep 1686 * channel The channel to reset. 1687 * reg_data The current TX_CS. 1688 * 1689 * Notes: 1690 * Called by nxge_check_tx_hang() 1691 * 1692 * NPI/NXGE function calls: 1693 * nxge_txdma_channel_hung() 1694 * 1695 * Registers accessed: 1696 * 1697 * Context: 1698 * Any domain 1699 */ 1700 int 1701 nxge_txdma_hung(p_nxge_t nxgep) 1702 { 1703 nxge_grp_set_t *set = &nxgep->tx_set; 1704 int tdc; 1705 boolean_t shared; 1706 1707 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1708 1709 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1710 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1711 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1712 return (B_FALSE); 1713 } 1714 1715 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1716 /* 1717 * Grab the shared state of the TDC. 1718 */ 1719 if (isLDOMservice(nxgep)) { 1720 nxge_hio_data_t *nhd = 1721 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1722 1723 MUTEX_ENTER(&nhd->lock); 1724 shared = nxgep->tdc_is_shared[tdc]; 1725 MUTEX_EXIT(&nhd->lock); 1726 } else { 1727 shared = B_FALSE; 1728 } 1729 1730 /* 1731 * Now, process continue to process. 1732 */ 1733 if (((1 << tdc) & set->owned.map) && !shared) { 1734 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1735 if (ring) { 1736 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1737 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1738 "==> nxge_txdma_hung: TDC %d hung", 1739 tdc)); 1740 return (B_TRUE); 1741 } 1742 } 1743 } 1744 } 1745 1746 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1747 1748 return (B_FALSE); 1749 } 1750 1751 /* 1752 * nxge_txdma_channel_hung 1753 * 1754 * Reset a TDC. 1755 * 1756 * Arguments: 1757 * nxgep 1758 * ring <channel>'s ring. 1759 * channel The channel to reset. 1760 * 1761 * Notes: 1762 * Called by nxge_txdma.c:nxge_txdma_hung() 1763 * 1764 * NPI/NXGE function calls: 1765 * npi_txdma_ring_head_get() 1766 * 1767 * Registers accessed: 1768 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1769 * 1770 * Context: 1771 * Any domain 1772 */ 1773 int 1774 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1775 { 1776 uint16_t head_index, tail_index; 1777 boolean_t head_wrap, tail_wrap; 1778 npi_handle_t handle; 1779 tx_ring_hdl_t tx_head; 1780 uint_t tx_rd_index; 1781 1782 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1783 1784 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1785 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1786 "==> nxge_txdma_channel_hung: channel %d", channel)); 1787 MUTEX_ENTER(&tx_ring_p->lock); 1788 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1789 1790 tail_index = tx_ring_p->wr_index; 1791 tail_wrap = tx_ring_p->wr_index_wrap; 1792 tx_rd_index = tx_ring_p->rd_index; 1793 MUTEX_EXIT(&tx_ring_p->lock); 1794 1795 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1796 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1797 "tail_index %d tail_wrap %d ", 1798 channel, tx_rd_index, tail_index, tail_wrap)); 1799 /* 1800 * Read the hardware maintained transmit head 1801 * and wrap around bit. 1802 */ 1803 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1804 head_index = tx_head.bits.ldw.head; 1805 head_wrap = tx_head.bits.ldw.wrap; 1806 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1807 "==> nxge_txdma_channel_hung: " 1808 "tx_rd_index %d tail %d tail_wrap %d " 1809 "head %d wrap %d", 1810 tx_rd_index, tail_index, tail_wrap, 1811 head_index, head_wrap)); 1812 1813 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1814 tail_index, tail_wrap) && 1815 (head_index == tx_rd_index)) { 1816 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1817 "==> nxge_txdma_channel_hung: EMPTY")); 1818 return (B_FALSE); 1819 } 1820 1821 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1822 "==> nxge_txdma_channel_hung: Checking if ring full")); 1823 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1824 tail_wrap)) { 1825 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1826 "==> nxge_txdma_channel_hung: full")); 1827 return (B_TRUE); 1828 } 1829 1830 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1831 1832 return (B_FALSE); 1833 } 1834 1835 /* 1836 * nxge_fixup_hung_txdma_rings 1837 * 1838 * Disable a TDC. 1839 * 1840 * Arguments: 1841 * nxgep 1842 * channel The channel to reset. 1843 * reg_data The current TX_CS. 1844 * 1845 * Notes: 1846 * Called by nxge_check_tx_hang() 1847 * 1848 * NPI/NXGE function calls: 1849 * npi_txdma_ring_head_get() 1850 * 1851 * Registers accessed: 1852 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1853 * 1854 * Context: 1855 * Any domain 1856 */ 1857 /*ARGSUSED*/ 1858 void 1859 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1860 { 1861 nxge_grp_set_t *set = &nxgep->tx_set; 1862 int tdc; 1863 1864 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1865 1866 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1867 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1868 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1869 return; 1870 } 1871 1872 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1873 if ((1 << tdc) & set->owned.map) { 1874 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1875 if (ring) { 1876 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1877 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1878 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1879 tdc)); 1880 } 1881 } 1882 } 1883 1884 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1885 } 1886 1887 /* 1888 * nxge_txdma_fixup_hung_channel 1889 * 1890 * 'Fix' a hung TDC. 1891 * 1892 * Arguments: 1893 * nxgep 1894 * channel The channel to fix. 1895 * 1896 * Notes: 1897 * Called by nxge_fixup_hung_txdma_rings() 1898 * 1899 * 1. Reclaim the TDC. 1900 * 2. Disable the TDC. 1901 * 1902 * NPI/NXGE function calls: 1903 * nxge_txdma_reclaim() 1904 * npi_txdma_channel_disable(TX_CS) 1905 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1906 * 1907 * Registers accessed: 1908 * TX_CS DMC+0x40028 Transmit Control And Status 1909 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1910 * 1911 * Context: 1912 * Any domain 1913 */ 1914 /*ARGSUSED*/ 1915 void 1916 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1917 { 1918 p_tx_ring_t ring_p; 1919 1920 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1921 ring_p = nxge_txdma_get_ring(nxgep, channel); 1922 if (ring_p == NULL) { 1923 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1924 "<== nxge_txdma_fix_hung_channel")); 1925 return; 1926 } 1927 1928 if (ring_p->tdc != channel) { 1929 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1930 "<== nxge_txdma_fix_hung_channel: channel not matched " 1931 "ring tdc %d passed channel", 1932 ring_p->tdc, channel)); 1933 return; 1934 } 1935 1936 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1937 1938 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1939 } 1940 1941 /*ARGSUSED*/ 1942 void 1943 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1944 uint16_t channel) 1945 { 1946 npi_handle_t handle; 1947 tdmc_intr_dbg_t intr_dbg; 1948 int status = NXGE_OK; 1949 1950 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1951 1952 if (ring_p == NULL) { 1953 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1954 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1955 return; 1956 } 1957 1958 if (ring_p->tdc != channel) { 1959 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1960 "<== nxge_txdma_fixup_hung_channel: channel " 1961 "not matched " 1962 "ring tdc %d passed channel", 1963 ring_p->tdc, channel)); 1964 return; 1965 } 1966 1967 /* Reclaim descriptors */ 1968 MUTEX_ENTER(&ring_p->lock); 1969 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1970 MUTEX_EXIT(&ring_p->lock); 1971 1972 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1973 /* 1974 * Stop the dma channel waits for the stop done. 1975 * If the stop done bit is not set, then force 1976 * an error. 1977 */ 1978 status = npi_txdma_channel_disable(handle, channel); 1979 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1980 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1981 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1982 "ring tdc %d passed channel %d", 1983 ring_p->tdc, channel)); 1984 return; 1985 } 1986 1987 /* Inject any error */ 1988 intr_dbg.value = 0; 1989 intr_dbg.bits.ldw.nack_pref = 1; 1990 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1991 1992 /* Stop done bit will be set as a result of error injection */ 1993 status = npi_txdma_channel_disable(handle, channel); 1994 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1995 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1996 "<== nxge_txdma_fixup_hung_channel: stopped again" 1997 "ring tdc %d passed channel", 1998 ring_p->tdc, channel)); 1999 return; 2000 } 2001 2002 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2003 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 2004 "ring tdc %d passed channel", 2005 ring_p->tdc, channel)); 2006 2007 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 2008 } 2009 2010 /*ARGSUSED*/ 2011 void 2012 nxge_reclaim_rings(p_nxge_t nxgep) 2013 { 2014 nxge_grp_set_t *set = &nxgep->tx_set; 2015 int tdc; 2016 2017 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 2018 2019 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2020 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2021 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2022 return; 2023 } 2024 2025 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2026 if ((1 << tdc) & set->owned.map) { 2027 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2028 if (ring) { 2029 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2030 "==> nxge_reclaim_rings: TDC %d", tdc)); 2031 MUTEX_ENTER(&ring->lock); 2032 (void) nxge_txdma_reclaim(nxgep, ring, 0); 2033 MUTEX_EXIT(&ring->lock); 2034 } 2035 } 2036 } 2037 2038 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 2039 } 2040 2041 void 2042 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 2043 { 2044 nxge_grp_set_t *set = &nxgep->tx_set; 2045 npi_handle_t handle; 2046 int tdc; 2047 2048 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2049 2050 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2051 2052 if (!isLDOMguest(nxgep)) { 2053 (void) npi_txdma_dump_fzc_regs(handle); 2054 2055 /* Dump TXC registers. */ 2056 (void) npi_txc_dump_fzc_regs(handle); 2057 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2058 } 2059 2060 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2061 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2062 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2063 return; 2064 } 2065 2066 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2067 if ((1 << tdc) & set->owned.map) { 2068 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2069 if (ring) { 2070 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2071 "==> nxge_txdma_regs_dump_channels: " 2072 "TDC %d", tdc)); 2073 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2074 2075 /* Dump TXC registers, if able to. */ 2076 if (!isLDOMguest(nxgep)) { 2077 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2078 "==> nxge_txdma_regs_dump_channels:" 2079 " FZC TDC %d", tdc)); 2080 (void) npi_txc_dump_tdc_fzc_regs 2081 (handle, tdc); 2082 } 2083 nxge_txdma_regs_dump(nxgep, tdc); 2084 } 2085 } 2086 } 2087 2088 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2089 } 2090 2091 void 2092 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2093 { 2094 npi_handle_t handle; 2095 tx_ring_hdl_t hdl; 2096 tx_ring_kick_t kick; 2097 tx_cs_t cs; 2098 txc_control_t control; 2099 uint32_t bitmap = 0; 2100 uint32_t burst = 0; 2101 uint32_t bytes = 0; 2102 dma_log_page_t cfg; 2103 2104 printf("\n\tfunc # %d tdc %d ", 2105 nxgep->function_num, channel); 2106 cfg.page_num = 0; 2107 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2108 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2109 printf("\n\tlog page func %d valid page 0 %d", 2110 cfg.func_num, cfg.valid); 2111 cfg.page_num = 1; 2112 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2113 printf("\n\tlog page func %d valid page 1 %d", 2114 cfg.func_num, cfg.valid); 2115 2116 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2117 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2118 printf("\n\thead value is 0x%0llx", 2119 (long long)hdl.value); 2120 printf("\n\thead index %d", hdl.bits.ldw.head); 2121 printf("\n\tkick value is 0x%0llx", 2122 (long long)kick.value); 2123 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2124 2125 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2126 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2127 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2128 2129 (void) npi_txc_control(handle, OP_GET, &control); 2130 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2131 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2132 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2133 2134 printf("\n\tTXC port control 0x%0llx", 2135 (long long)control.value); 2136 printf("\n\tTXC port bitmap 0x%x", bitmap); 2137 printf("\n\tTXC max burst %d", burst); 2138 printf("\n\tTXC bytes xmt %d\n", bytes); 2139 2140 { 2141 ipp_status_t status; 2142 2143 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2144 #if defined(__i386) 2145 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2146 #else 2147 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2148 #endif 2149 } 2150 } 2151 2152 /* 2153 * nxge_tdc_hvio_setup 2154 * 2155 * I'm not exactly sure what this code does. 2156 * 2157 * Arguments: 2158 * nxgep 2159 * channel The channel to map. 2160 * 2161 * Notes: 2162 * 2163 * NPI/NXGE function calls: 2164 * na 2165 * 2166 * Context: 2167 * Service domain? 2168 */ 2169 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2170 static void 2171 nxge_tdc_hvio_setup( 2172 nxge_t *nxgep, int channel) 2173 { 2174 nxge_dma_common_t *data; 2175 nxge_dma_common_t *control; 2176 tx_ring_t *ring; 2177 2178 ring = nxgep->tx_rings->rings[channel]; 2179 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2180 2181 ring->hv_set = B_FALSE; 2182 2183 ring->hv_tx_buf_base_ioaddr_pp = 2184 (uint64_t)data->orig_ioaddr_pp; 2185 ring->hv_tx_buf_ioaddr_size = 2186 (uint64_t)data->orig_alength; 2187 2188 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2189 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2190 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2191 ring->hv_tx_buf_base_ioaddr_pp, 2192 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2193 data->ioaddr_pp, data->orig_vatopa, 2194 data->orig_alength, data->orig_alength)); 2195 2196 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2197 2198 ring->hv_tx_cntl_base_ioaddr_pp = 2199 (uint64_t)control->orig_ioaddr_pp; 2200 ring->hv_tx_cntl_ioaddr_size = 2201 (uint64_t)control->orig_alength; 2202 2203 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2204 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2205 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2206 ring->hv_tx_cntl_base_ioaddr_pp, 2207 control->orig_ioaddr_pp, control->orig_vatopa, 2208 ring->hv_tx_cntl_ioaddr_size, 2209 control->orig_alength, control->orig_alength)); 2210 } 2211 #endif 2212 2213 static nxge_status_t 2214 nxge_map_txdma(p_nxge_t nxgep, int channel) 2215 { 2216 nxge_dma_common_t **pData; 2217 nxge_dma_common_t **pControl; 2218 tx_ring_t **pRing, *ring; 2219 tx_mbox_t **mailbox; 2220 uint32_t num_chunks; 2221 2222 nxge_status_t status = NXGE_OK; 2223 2224 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2225 2226 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2227 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2228 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2229 "<== nxge_map_txdma: buf not allocated")); 2230 return (NXGE_ERROR); 2231 } 2232 } 2233 2234 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2235 return (NXGE_ERROR); 2236 2237 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2238 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2239 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2240 pRing = &nxgep->tx_rings->rings[channel]; 2241 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2242 2243 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2244 "tx_rings $%p tx_desc_rings $%p", 2245 nxgep->tx_rings, nxgep->tx_rings->rings)); 2246 2247 /* 2248 * Map descriptors from the buffer pools for <channel>. 2249 */ 2250 2251 /* 2252 * Set up and prepare buffer blocks, descriptors 2253 * and mailbox. 2254 */ 2255 status = nxge_map_txdma_channel(nxgep, channel, 2256 pData, pRing, num_chunks, pControl, mailbox); 2257 if (status != NXGE_OK) { 2258 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2259 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2260 "returned 0x%x", 2261 nxgep, channel, status)); 2262 return (status); 2263 } 2264 2265 ring = *pRing; 2266 2267 ring->index = (uint16_t)channel; 2268 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2269 2270 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2271 if (isLDOMguest(nxgep)) { 2272 (void) nxge_tdc_lp_conf(nxgep, channel); 2273 } else { 2274 nxge_tdc_hvio_setup(nxgep, channel); 2275 } 2276 #endif 2277 2278 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2279 "(status 0x%x channel %d)", status, channel)); 2280 2281 return (status); 2282 } 2283 2284 static nxge_status_t 2285 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2286 p_nxge_dma_common_t *dma_buf_p, 2287 p_tx_ring_t *tx_desc_p, 2288 uint32_t num_chunks, 2289 p_nxge_dma_common_t *dma_cntl_p, 2290 p_tx_mbox_t *tx_mbox_p) 2291 { 2292 int status = NXGE_OK; 2293 2294 /* 2295 * Set up and prepare buffer blocks, descriptors 2296 * and mailbox. 2297 */ 2298 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2299 "==> nxge_map_txdma_channel (channel %d)", channel)); 2300 /* 2301 * Transmit buffer blocks 2302 */ 2303 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2304 dma_buf_p, tx_desc_p, num_chunks); 2305 if (status != NXGE_OK) { 2306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2307 "==> nxge_map_txdma_channel (channel %d): " 2308 "map buffer failed 0x%x", channel, status)); 2309 goto nxge_map_txdma_channel_exit; 2310 } 2311 2312 /* 2313 * Transmit block ring, and mailbox. 2314 */ 2315 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2316 tx_mbox_p); 2317 2318 goto nxge_map_txdma_channel_exit; 2319 2320 nxge_map_txdma_channel_fail1: 2321 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2322 "==> nxge_map_txdma_channel: unmap buf" 2323 "(status 0x%x channel %d)", 2324 status, channel)); 2325 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2326 2327 nxge_map_txdma_channel_exit: 2328 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2329 "<== nxge_map_txdma_channel: " 2330 "(status 0x%x channel %d)", 2331 status, channel)); 2332 2333 return (status); 2334 } 2335 2336 /*ARGSUSED*/ 2337 static void 2338 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2339 { 2340 tx_ring_t *ring; 2341 tx_mbox_t *mailbox; 2342 2343 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2344 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2345 /* 2346 * unmap tx block ring, and mailbox. 2347 */ 2348 ring = nxgep->tx_rings->rings[channel]; 2349 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2350 2351 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2352 2353 /* unmap buffer blocks */ 2354 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2355 2356 nxge_free_txb(nxgep, channel); 2357 2358 /* 2359 * Cleanup the reference to the ring now that it does not exist. 2360 */ 2361 nxgep->tx_rings->rings[channel] = NULL; 2362 2363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2364 } 2365 2366 /* 2367 * nxge_map_txdma_channel_cfg_ring 2368 * 2369 * Map a TDC into our kernel space. 2370 * This function allocates all of the per-channel data structures. 2371 * 2372 * Arguments: 2373 * nxgep 2374 * dma_channel The channel to map. 2375 * dma_cntl_p 2376 * tx_ring_p dma_channel's transmit ring 2377 * tx_mbox_p dma_channel's mailbox 2378 * 2379 * Notes: 2380 * 2381 * NPI/NXGE function calls: 2382 * nxge_setup_dma_common() 2383 * 2384 * Registers accessed: 2385 * none. 2386 * 2387 * Context: 2388 * Any domain 2389 */ 2390 /*ARGSUSED*/ 2391 static void 2392 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2393 p_nxge_dma_common_t *dma_cntl_p, 2394 p_tx_ring_t tx_ring_p, 2395 p_tx_mbox_t *tx_mbox_p) 2396 { 2397 p_tx_mbox_t mboxp; 2398 p_nxge_dma_common_t cntl_dmap; 2399 p_nxge_dma_common_t dmap; 2400 p_tx_rng_cfig_t tx_ring_cfig_p; 2401 p_tx_ring_kick_t tx_ring_kick_p; 2402 p_tx_cs_t tx_cs_p; 2403 p_tx_dma_ent_msk_t tx_evmask_p; 2404 p_txdma_mbh_t mboxh_p; 2405 p_txdma_mbl_t mboxl_p; 2406 uint64_t tx_desc_len; 2407 2408 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2409 "==> nxge_map_txdma_channel_cfg_ring")); 2410 2411 cntl_dmap = *dma_cntl_p; 2412 2413 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2414 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2415 sizeof (tx_desc_t)); 2416 /* 2417 * Zero out transmit ring descriptors. 2418 */ 2419 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2420 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2421 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2422 tx_cs_p = &(tx_ring_p->tx_cs); 2423 tx_evmask_p = &(tx_ring_p->tx_evmask); 2424 tx_ring_cfig_p->value = 0; 2425 tx_ring_kick_p->value = 0; 2426 tx_cs_p->value = 0; 2427 tx_evmask_p->value = 0; 2428 2429 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2430 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2431 dma_channel, 2432 dmap->dma_cookie.dmac_laddress)); 2433 2434 tx_ring_cfig_p->value = 0; 2435 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2436 tx_ring_cfig_p->value = 2437 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2438 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2439 2440 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2441 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2442 dma_channel, 2443 tx_ring_cfig_p->value)); 2444 2445 tx_cs_p->bits.ldw.rst = 1; 2446 2447 /* Map in mailbox */ 2448 mboxp = (p_tx_mbox_t) 2449 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2450 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2451 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2452 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2453 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2454 mboxh_p->value = mboxl_p->value = 0; 2455 2456 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2457 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2458 dmap->dma_cookie.dmac_laddress)); 2459 2460 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2461 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2462 2463 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2464 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2465 2466 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2467 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2468 dmap->dma_cookie.dmac_laddress)); 2469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2470 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2471 "mbox $%p", 2472 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2473 tx_ring_p->page_valid.value = 0; 2474 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2475 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2476 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2477 tx_ring_p->page_hdl.value = 0; 2478 2479 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2480 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2481 2482 tx_ring_p->max_burst.value = 0; 2483 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2484 2485 *tx_mbox_p = mboxp; 2486 2487 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2488 "<== nxge_map_txdma_channel_cfg_ring")); 2489 } 2490 2491 /*ARGSUSED*/ 2492 static void 2493 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2494 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2495 { 2496 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2497 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2498 tx_ring_p->tdc)); 2499 2500 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2501 2502 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2503 "<== nxge_unmap_txdma_channel_cfg_ring")); 2504 } 2505 2506 /* 2507 * nxge_map_txdma_channel_buf_ring 2508 * 2509 * 2510 * Arguments: 2511 * nxgep 2512 * channel The channel to map. 2513 * dma_buf_p 2514 * tx_desc_p channel's descriptor ring 2515 * num_chunks 2516 * 2517 * Notes: 2518 * 2519 * NPI/NXGE function calls: 2520 * nxge_setup_dma_common() 2521 * 2522 * Registers accessed: 2523 * none. 2524 * 2525 * Context: 2526 * Any domain 2527 */ 2528 static nxge_status_t 2529 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2530 p_nxge_dma_common_t *dma_buf_p, 2531 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2532 { 2533 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2534 p_nxge_dma_common_t dmap; 2535 nxge_os_dma_handle_t tx_buf_dma_handle; 2536 p_tx_ring_t tx_ring_p; 2537 p_tx_msg_t tx_msg_ring = NULL; 2538 nxge_status_t status = NXGE_OK; 2539 int ddi_status = DDI_SUCCESS; 2540 int i, j, index = 0; 2541 uint32_t size = 0, bsize; 2542 uint32_t nblocks, nmsgs; 2543 char qname[TASKQ_NAMELEN]; 2544 2545 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2546 "==> nxge_map_txdma_channel_buf_ring")); 2547 2548 dma_bufp = tmp_bufp = *dma_buf_p; 2549 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2550 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2551 "chunks bufp $%p", 2552 channel, num_chunks, dma_bufp)); 2553 2554 nmsgs = 0; 2555 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2556 nmsgs += tmp_bufp->nblocks; 2557 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2558 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2559 "bufp $%p nblocks %d nmsgs %d", 2560 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2561 } 2562 if (!nmsgs) { 2563 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2564 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2565 "no msg blocks", 2566 channel)); 2567 status = NXGE_ERROR; 2568 goto nxge_map_txdma_channel_buf_ring_exit; 2569 } 2570 2571 tx_ring_p = (p_tx_ring_t) 2572 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2573 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2574 (void *)nxgep->interrupt_cookie); 2575 2576 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2577 tx_ring_p->tx_ring_busy = B_FALSE; 2578 tx_ring_p->nxgep = nxgep; 2579 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL; 2580 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d", 2581 nxgep->instance, channel); 2582 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1, 2583 TASKQ_DEFAULTPRI, 0); 2584 if (tx_ring_p->taskq == NULL) { 2585 goto nxge_map_txdma_channel_buf_ring_fail1; 2586 } 2587 2588 /* 2589 * Allocate transmit message rings and handles for packets 2590 * not to be copied to premapped buffers. 2591 */ 2592 size = nmsgs * sizeof (tx_msg_t); 2593 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2594 for (i = 0; i < nmsgs; i++) { 2595 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2596 DDI_DMA_DONTWAIT, 0, 2597 &tx_msg_ring[i].dma_handle); 2598 if (ddi_status != DDI_SUCCESS) { 2599 status |= NXGE_DDI_FAILED; 2600 break; 2601 } 2602 } 2603 if (i < nmsgs) { 2604 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2605 "Allocate handles failed.")); 2606 goto nxge_map_txdma_channel_buf_ring_fail1; 2607 } 2608 2609 tx_ring_p->tdc = channel; 2610 tx_ring_p->tx_msg_ring = tx_msg_ring; 2611 tx_ring_p->tx_ring_size = nmsgs; 2612 tx_ring_p->num_chunks = num_chunks; 2613 if (!nxge_tx_intr_thres) { 2614 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2615 } 2616 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2617 tx_ring_p->rd_index = 0; 2618 tx_ring_p->wr_index = 0; 2619 tx_ring_p->ring_head.value = 0; 2620 tx_ring_p->ring_kick_tail.value = 0; 2621 tx_ring_p->descs_pending = 0; 2622 2623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2624 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2625 "actual tx desc max %d nmsgs %d " 2626 "(config nxge_tx_ring_size %d)", 2627 channel, tx_ring_p->tx_ring_size, nmsgs, 2628 nxge_tx_ring_size)); 2629 2630 /* 2631 * Map in buffers from the buffer pool. 2632 */ 2633 index = 0; 2634 bsize = dma_bufp->block_size; 2635 2636 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2637 "dma_bufp $%p tx_rng_p $%p " 2638 "tx_msg_rng_p $%p bsize %d", 2639 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2640 2641 tx_buf_dma_handle = dma_bufp->dma_handle; 2642 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2643 bsize = dma_bufp->block_size; 2644 nblocks = dma_bufp->nblocks; 2645 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2646 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2647 "size %d dma_bufp $%p", 2648 i, sizeof (nxge_dma_common_t), dma_bufp)); 2649 2650 for (j = 0; j < nblocks; j++) { 2651 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2652 dmap = &tx_msg_ring[index++].buf_dma; 2653 #ifdef TX_MEM_DEBUG 2654 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2655 "==> nxge_map_txdma_channel_buf_ring: j %d" 2656 "dmap $%p", i, dmap)); 2657 #endif 2658 nxge_setup_dma_common(dmap, dma_bufp, 1, 2659 bsize); 2660 } 2661 } 2662 2663 if (i < num_chunks) { 2664 status = NXGE_ERROR; 2665 goto nxge_map_txdma_channel_buf_ring_fail1; 2666 } 2667 2668 *tx_desc_p = tx_ring_p; 2669 2670 goto nxge_map_txdma_channel_buf_ring_exit; 2671 2672 nxge_map_txdma_channel_buf_ring_fail1: 2673 if (tx_ring_p->taskq) { 2674 ddi_taskq_destroy(tx_ring_p->taskq); 2675 tx_ring_p->taskq = NULL; 2676 } 2677 2678 index--; 2679 for (; index >= 0; index--) { 2680 if (tx_msg_ring[index].dma_handle != NULL) { 2681 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2682 } 2683 } 2684 MUTEX_DESTROY(&tx_ring_p->lock); 2685 KMEM_FREE(tx_msg_ring, size); 2686 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2687 2688 status = NXGE_ERROR; 2689 2690 nxge_map_txdma_channel_buf_ring_exit: 2691 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2692 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2693 2694 return (status); 2695 } 2696 2697 /*ARGSUSED*/ 2698 static void 2699 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2700 { 2701 p_tx_msg_t tx_msg_ring; 2702 p_tx_msg_t tx_msg_p; 2703 int i; 2704 2705 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2706 "==> nxge_unmap_txdma_channel_buf_ring")); 2707 if (tx_ring_p == NULL) { 2708 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2709 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2710 return; 2711 } 2712 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2713 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2714 tx_ring_p->tdc)); 2715 2716 tx_msg_ring = tx_ring_p->tx_msg_ring; 2717 2718 /* 2719 * Since the serialization thread, timer thread and 2720 * interrupt thread can all call the transmit reclaim, 2721 * the unmapping function needs to acquire the lock 2722 * to free those buffers which were transmitted 2723 * by the hardware already. 2724 */ 2725 MUTEX_ENTER(&tx_ring_p->lock); 2726 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2727 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2728 "channel %d", 2729 tx_ring_p->tdc)); 2730 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2731 2732 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2733 tx_msg_p = &tx_msg_ring[i]; 2734 if (tx_msg_p->tx_message != NULL) { 2735 freemsg(tx_msg_p->tx_message); 2736 tx_msg_p->tx_message = NULL; 2737 } 2738 } 2739 2740 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2741 if (tx_msg_ring[i].dma_handle != NULL) { 2742 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2743 } 2744 tx_msg_ring[i].dma_handle = NULL; 2745 } 2746 2747 MUTEX_EXIT(&tx_ring_p->lock); 2748 2749 if (tx_ring_p->taskq) { 2750 ddi_taskq_destroy(tx_ring_p->taskq); 2751 tx_ring_p->taskq = NULL; 2752 } 2753 2754 MUTEX_DESTROY(&tx_ring_p->lock); 2755 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2756 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2757 2758 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2759 "<== nxge_unmap_txdma_channel_buf_ring")); 2760 } 2761 2762 static nxge_status_t 2763 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2764 { 2765 p_tx_rings_t tx_rings; 2766 p_tx_ring_t *tx_desc_rings; 2767 p_tx_mbox_areas_t tx_mbox_areas_p; 2768 p_tx_mbox_t *tx_mbox_p; 2769 nxge_status_t status = NXGE_OK; 2770 2771 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2772 2773 tx_rings = nxgep->tx_rings; 2774 if (tx_rings == NULL) { 2775 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2776 "<== nxge_txdma_hw_start: NULL ring pointer")); 2777 return (NXGE_ERROR); 2778 } 2779 tx_desc_rings = tx_rings->rings; 2780 if (tx_desc_rings == NULL) { 2781 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2782 "<== nxge_txdma_hw_start: NULL ring pointers")); 2783 return (NXGE_ERROR); 2784 } 2785 2786 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2787 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2788 2789 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2790 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2791 2792 status = nxge_txdma_start_channel(nxgep, channel, 2793 (p_tx_ring_t)tx_desc_rings[channel], 2794 (p_tx_mbox_t)tx_mbox_p[channel]); 2795 if (status != NXGE_OK) { 2796 goto nxge_txdma_hw_start_fail1; 2797 } 2798 2799 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2800 "tx_rings $%p rings $%p", 2801 nxgep->tx_rings, nxgep->tx_rings->rings)); 2802 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2803 "tx_rings $%p tx_desc_rings $%p", 2804 nxgep->tx_rings, tx_desc_rings)); 2805 2806 goto nxge_txdma_hw_start_exit; 2807 2808 nxge_txdma_hw_start_fail1: 2809 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2810 "==> nxge_txdma_hw_start: disable " 2811 "(status 0x%x channel %d)", status, channel)); 2812 2813 nxge_txdma_hw_start_exit: 2814 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2815 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2816 2817 return (status); 2818 } 2819 2820 /* 2821 * nxge_txdma_start_channel 2822 * 2823 * Start a TDC. 2824 * 2825 * Arguments: 2826 * nxgep 2827 * channel The channel to start. 2828 * tx_ring_p channel's transmit descriptor ring. 2829 * tx_mbox_p channel' smailbox. 2830 * 2831 * Notes: 2832 * 2833 * NPI/NXGE function calls: 2834 * nxge_reset_txdma_channel() 2835 * nxge_init_txdma_channel_event_mask() 2836 * nxge_enable_txdma_channel() 2837 * 2838 * Registers accessed: 2839 * none directly (see functions above). 2840 * 2841 * Context: 2842 * Any domain 2843 */ 2844 static nxge_status_t 2845 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2846 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2847 2848 { 2849 nxge_status_t status = NXGE_OK; 2850 2851 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2852 "==> nxge_txdma_start_channel (channel %d)", channel)); 2853 /* 2854 * TXDMA/TXC must be in stopped state. 2855 */ 2856 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2857 2858 /* 2859 * Reset TXDMA channel 2860 */ 2861 tx_ring_p->tx_cs.value = 0; 2862 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2863 status = nxge_reset_txdma_channel(nxgep, channel, 2864 tx_ring_p->tx_cs.value); 2865 if (status != NXGE_OK) { 2866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2867 "==> nxge_txdma_start_channel (channel %d)" 2868 " reset channel failed 0x%x", channel, status)); 2869 goto nxge_txdma_start_channel_exit; 2870 } 2871 2872 /* 2873 * Initialize the TXDMA channel specific FZC control 2874 * configurations. These FZC registers are pertaining 2875 * to each TX channel (i.e. logical pages). 2876 */ 2877 if (!isLDOMguest(nxgep)) { 2878 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2879 tx_ring_p, tx_mbox_p); 2880 if (status != NXGE_OK) { 2881 goto nxge_txdma_start_channel_exit; 2882 } 2883 } 2884 2885 /* 2886 * Initialize the event masks. 2887 */ 2888 tx_ring_p->tx_evmask.value = 0; 2889 status = nxge_init_txdma_channel_event_mask(nxgep, 2890 channel, &tx_ring_p->tx_evmask); 2891 if (status != NXGE_OK) { 2892 goto nxge_txdma_start_channel_exit; 2893 } 2894 2895 /* 2896 * Load TXDMA descriptors, buffers, mailbox, 2897 * initialise the DMA channels and 2898 * enable each DMA channel. 2899 */ 2900 status = nxge_enable_txdma_channel(nxgep, channel, 2901 tx_ring_p, tx_mbox_p); 2902 if (status != NXGE_OK) { 2903 goto nxge_txdma_start_channel_exit; 2904 } 2905 2906 nxge_txdma_start_channel_exit: 2907 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2908 2909 return (status); 2910 } 2911 2912 /* 2913 * nxge_txdma_stop_channel 2914 * 2915 * Stop a TDC. 2916 * 2917 * Arguments: 2918 * nxgep 2919 * channel The channel to stop. 2920 * tx_ring_p channel's transmit descriptor ring. 2921 * tx_mbox_p channel' smailbox. 2922 * 2923 * Notes: 2924 * 2925 * NPI/NXGE function calls: 2926 * nxge_txdma_stop_inj_err() 2927 * nxge_reset_txdma_channel() 2928 * nxge_init_txdma_channel_event_mask() 2929 * nxge_init_txdma_channel_cntl_stat() 2930 * nxge_disable_txdma_channel() 2931 * 2932 * Registers accessed: 2933 * none directly (see functions above). 2934 * 2935 * Context: 2936 * Any domain 2937 */ 2938 /*ARGSUSED*/ 2939 static nxge_status_t 2940 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2941 { 2942 p_tx_ring_t tx_ring_p; 2943 int status = NXGE_OK; 2944 2945 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2946 "==> nxge_txdma_stop_channel: channel %d", channel)); 2947 2948 /* 2949 * Stop (disable) TXDMA and TXC (if stop bit is set 2950 * and STOP_N_GO bit not set, the TXDMA reset state will 2951 * not be set if reset TXDMA. 2952 */ 2953 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2954 2955 if (nxgep->tx_rings == NULL) { 2956 status = NXGE_ERROR; 2957 goto nxge_txdma_stop_channel_exit; 2958 } 2959 2960 tx_ring_p = nxgep->tx_rings->rings[channel]; 2961 if (tx_ring_p == NULL) { 2962 status = NXGE_ERROR; 2963 goto nxge_txdma_stop_channel_exit; 2964 } 2965 2966 /* 2967 * Reset TXDMA channel 2968 */ 2969 tx_ring_p->tx_cs.value = 0; 2970 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2971 status = nxge_reset_txdma_channel(nxgep, channel, 2972 tx_ring_p->tx_cs.value); 2973 if (status != NXGE_OK) { 2974 goto nxge_txdma_stop_channel_exit; 2975 } 2976 2977 #ifdef HARDWARE_REQUIRED 2978 /* Set up the interrupt event masks. */ 2979 tx_ring_p->tx_evmask.value = 0; 2980 status = nxge_init_txdma_channel_event_mask(nxgep, 2981 channel, &tx_ring_p->tx_evmask); 2982 if (status != NXGE_OK) { 2983 goto nxge_txdma_stop_channel_exit; 2984 } 2985 2986 /* Initialize the DMA control and status register */ 2987 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2988 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2989 tx_ring_p->tx_cs.value); 2990 if (status != NXGE_OK) { 2991 goto nxge_txdma_stop_channel_exit; 2992 } 2993 2994 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2995 2996 /* Disable channel */ 2997 status = nxge_disable_txdma_channel(nxgep, channel, 2998 tx_ring_p, tx_mbox_p); 2999 if (status != NXGE_OK) { 3000 goto nxge_txdma_start_channel_exit; 3001 } 3002 3003 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3004 "==> nxge_txdma_stop_channel: event done")); 3005 3006 #endif 3007 3008 nxge_txdma_stop_channel_exit: 3009 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 3010 return (status); 3011 } 3012 3013 /* 3014 * nxge_txdma_get_ring 3015 * 3016 * Get the ring for a TDC. 3017 * 3018 * Arguments: 3019 * nxgep 3020 * channel 3021 * 3022 * Notes: 3023 * 3024 * NPI/NXGE function calls: 3025 * 3026 * Registers accessed: 3027 * 3028 * Context: 3029 * Any domain 3030 */ 3031 static p_tx_ring_t 3032 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 3033 { 3034 nxge_grp_set_t *set = &nxgep->tx_set; 3035 int tdc; 3036 3037 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 3038 3039 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3040 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3041 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 3042 goto return_null; 3043 } 3044 3045 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3046 if ((1 << tdc) & set->owned.map) { 3047 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3048 if (ring) { 3049 if (channel == ring->tdc) { 3050 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3051 "<== nxge_txdma_get_ring: " 3052 "tdc %d ring $%p", tdc, ring)); 3053 return (ring); 3054 } 3055 } 3056 } 3057 } 3058 3059 return_null: 3060 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 3061 "ring not found")); 3062 3063 return (NULL); 3064 } 3065 3066 /* 3067 * nxge_txdma_get_mbox 3068 * 3069 * Get the mailbox for a TDC. 3070 * 3071 * Arguments: 3072 * nxgep 3073 * channel 3074 * 3075 * Notes: 3076 * 3077 * NPI/NXGE function calls: 3078 * 3079 * Registers accessed: 3080 * 3081 * Context: 3082 * Any domain 3083 */ 3084 static p_tx_mbox_t 3085 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3086 { 3087 nxge_grp_set_t *set = &nxgep->tx_set; 3088 int tdc; 3089 3090 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3091 3092 if (nxgep->tx_mbox_areas_p == 0 || 3093 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3094 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3095 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3096 goto return_null; 3097 } 3098 3099 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3100 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3101 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3102 goto return_null; 3103 } 3104 3105 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3106 if ((1 << tdc) & set->owned.map) { 3107 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3108 if (ring) { 3109 if (channel == ring->tdc) { 3110 tx_mbox_t *mailbox = nxgep-> 3111 tx_mbox_areas_p-> 3112 txmbox_areas_p[tdc]; 3113 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3114 "<== nxge_txdma_get_mbox: tdc %d " 3115 "ring $%p", tdc, mailbox)); 3116 return (mailbox); 3117 } 3118 } 3119 } 3120 } 3121 3122 return_null: 3123 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3124 "mailbox not found")); 3125 3126 return (NULL); 3127 } 3128 3129 /* 3130 * nxge_tx_err_evnts 3131 * 3132 * Recover a TDC. 3133 * 3134 * Arguments: 3135 * nxgep 3136 * index The index to the TDC ring. 3137 * ldvp Used to get the channel number ONLY. 3138 * cs A copy of the bits from TX_CS. 3139 * 3140 * Notes: 3141 * Calling tree: 3142 * nxge_tx_intr() 3143 * 3144 * NPI/NXGE function calls: 3145 * npi_txdma_ring_error_get() 3146 * npi_txdma_inj_par_error_get() 3147 * nxge_txdma_fatal_err_recover() 3148 * 3149 * Registers accessed: 3150 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3151 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3152 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3153 * 3154 * Context: 3155 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3156 */ 3157 /*ARGSUSED*/ 3158 static nxge_status_t 3159 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3160 { 3161 npi_handle_t handle; 3162 npi_status_t rs; 3163 uint8_t channel; 3164 p_tx_ring_t *tx_rings; 3165 p_tx_ring_t tx_ring_p; 3166 p_nxge_tx_ring_stats_t tdc_stats; 3167 boolean_t txchan_fatal = B_FALSE; 3168 nxge_status_t status = NXGE_OK; 3169 tdmc_inj_par_err_t par_err; 3170 uint32_t value; 3171 3172 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3173 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3174 channel = ldvp->channel; 3175 3176 tx_rings = nxgep->tx_rings->rings; 3177 tx_ring_p = tx_rings[index]; 3178 tdc_stats = tx_ring_p->tdc_stats; 3179 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3180 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3181 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3182 if ((rs = npi_txdma_ring_error_get(handle, channel, 3183 &tdc_stats->errlog)) != NPI_SUCCESS) 3184 return (NXGE_ERROR | rs); 3185 } 3186 3187 if (cs.bits.ldw.mbox_err) { 3188 tdc_stats->mbox_err++; 3189 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3190 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3192 "==> nxge_tx_err_evnts(channel %d): " 3193 "fatal error: mailbox", channel)); 3194 txchan_fatal = B_TRUE; 3195 } 3196 if (cs.bits.ldw.pkt_size_err) { 3197 tdc_stats->pkt_size_err++; 3198 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3199 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3201 "==> nxge_tx_err_evnts(channel %d): " 3202 "fatal error: pkt_size_err", channel)); 3203 txchan_fatal = B_TRUE; 3204 } 3205 if (cs.bits.ldw.tx_ring_oflow) { 3206 tdc_stats->tx_ring_oflow++; 3207 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3208 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3209 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3210 "==> nxge_tx_err_evnts(channel %d): " 3211 "fatal error: tx_ring_oflow", channel)); 3212 txchan_fatal = B_TRUE; 3213 } 3214 if (cs.bits.ldw.pref_buf_par_err) { 3215 tdc_stats->pre_buf_par_err++; 3216 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3217 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3218 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3219 "==> nxge_tx_err_evnts(channel %d): " 3220 "fatal error: pre_buf_par_err", channel)); 3221 /* Clear error injection source for parity error */ 3222 (void) npi_txdma_inj_par_error_get(handle, &value); 3223 par_err.value = value; 3224 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3225 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3226 txchan_fatal = B_TRUE; 3227 } 3228 if (cs.bits.ldw.nack_pref) { 3229 tdc_stats->nack_pref++; 3230 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3231 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3233 "==> nxge_tx_err_evnts(channel %d): " 3234 "fatal error: nack_pref", channel)); 3235 txchan_fatal = B_TRUE; 3236 } 3237 if (cs.bits.ldw.nack_pkt_rd) { 3238 tdc_stats->nack_pkt_rd++; 3239 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3240 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3242 "==> nxge_tx_err_evnts(channel %d): " 3243 "fatal error: nack_pkt_rd", channel)); 3244 txchan_fatal = B_TRUE; 3245 } 3246 if (cs.bits.ldw.conf_part_err) { 3247 tdc_stats->conf_part_err++; 3248 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3249 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3251 "==> nxge_tx_err_evnts(channel %d): " 3252 "fatal error: config_partition_err", channel)); 3253 txchan_fatal = B_TRUE; 3254 } 3255 if (cs.bits.ldw.pkt_prt_err) { 3256 tdc_stats->pkt_part_err++; 3257 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3258 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3260 "==> nxge_tx_err_evnts(channel %d): " 3261 "fatal error: pkt_prt_err", channel)); 3262 txchan_fatal = B_TRUE; 3263 } 3264 3265 /* Clear error injection source in case this is an injected error */ 3266 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3267 3268 if (txchan_fatal) { 3269 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3270 " nxge_tx_err_evnts: " 3271 " fatal error on channel %d cs 0x%llx\n", 3272 channel, cs.value)); 3273 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3274 tx_ring_p); 3275 if (status == NXGE_OK) { 3276 FM_SERVICE_RESTORED(nxgep); 3277 } 3278 } 3279 3280 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3281 3282 return (status); 3283 } 3284 3285 static nxge_status_t 3286 nxge_txdma_fatal_err_recover( 3287 p_nxge_t nxgep, 3288 uint16_t channel, 3289 p_tx_ring_t tx_ring_p) 3290 { 3291 npi_handle_t handle; 3292 npi_status_t rs = NPI_SUCCESS; 3293 p_tx_mbox_t tx_mbox_p; 3294 nxge_status_t status = NXGE_OK; 3295 3296 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3298 "Recovering from TxDMAChannel#%d error...", channel)); 3299 3300 /* 3301 * Stop the dma channel waits for the stop done. 3302 * If the stop done bit is not set, then create 3303 * an error. 3304 */ 3305 3306 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3307 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3308 MUTEX_ENTER(&tx_ring_p->lock); 3309 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3310 if (rs != NPI_SUCCESS) { 3311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3312 "==> nxge_txdma_fatal_err_recover (channel %d): " 3313 "stop failed ", channel)); 3314 goto fail; 3315 } 3316 3317 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3318 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3319 3320 /* 3321 * Reset TXDMA channel 3322 */ 3323 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3324 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3325 NPI_SUCCESS) { 3326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3327 "==> nxge_txdma_fatal_err_recover (channel %d)" 3328 " reset channel failed 0x%x", channel, rs)); 3329 goto fail; 3330 } 3331 3332 /* 3333 * Reset the tail (kick) register to 0. 3334 * (Hardware will not reset it. Tx overflow fatal 3335 * error if tail is not set to 0 after reset! 3336 */ 3337 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3338 3339 /* Restart TXDMA channel */ 3340 3341 tx_mbox_p = NULL; 3342 if (!isLDOMguest(nxgep)) { 3343 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3344 3345 // XXX This is a problem in HIO! 3346 /* 3347 * Initialize the TXDMA channel specific FZC control 3348 * configurations. These FZC registers are pertaining 3349 * to each TX channel (i.e. logical pages). 3350 */ 3351 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3352 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3353 tx_ring_p, tx_mbox_p); 3354 if (status != NXGE_OK) 3355 goto fail; 3356 } 3357 3358 /* 3359 * Initialize the event masks. 3360 */ 3361 tx_ring_p->tx_evmask.value = 0; 3362 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3363 &tx_ring_p->tx_evmask); 3364 if (status != NXGE_OK) 3365 goto fail; 3366 3367 tx_ring_p->wr_index_wrap = B_FALSE; 3368 tx_ring_p->wr_index = 0; 3369 tx_ring_p->rd_index = 0; 3370 3371 /* 3372 * Load TXDMA descriptors, buffers, mailbox, 3373 * initialise the DMA channels and 3374 * enable each DMA channel. 3375 */ 3376 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3377 status = nxge_enable_txdma_channel(nxgep, channel, 3378 tx_ring_p, tx_mbox_p); 3379 MUTEX_EXIT(&tx_ring_p->lock); 3380 if (status != NXGE_OK) 3381 goto fail; 3382 3383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3384 "Recovery Successful, TxDMAChannel#%d Restored", 3385 channel)); 3386 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3387 3388 return (NXGE_OK); 3389 3390 fail: 3391 MUTEX_EXIT(&tx_ring_p->lock); 3392 3393 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3394 "nxge_txdma_fatal_err_recover (channel %d): " 3395 "failed to recover this txdma channel", channel)); 3396 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3397 3398 return (status); 3399 } 3400 3401 /* 3402 * nxge_tx_port_fatal_err_recover 3403 * 3404 * Attempt to recover from a fatal port error. 3405 * 3406 * Arguments: 3407 * nxgep 3408 * 3409 * Notes: 3410 * How would a guest do this? 3411 * 3412 * NPI/NXGE function calls: 3413 * 3414 * Registers accessed: 3415 * 3416 * Context: 3417 * Service domain 3418 */ 3419 nxge_status_t 3420 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3421 { 3422 nxge_grp_set_t *set = &nxgep->tx_set; 3423 nxge_channel_t tdc; 3424 3425 tx_ring_t *ring; 3426 tx_mbox_t *mailbox; 3427 3428 npi_handle_t handle; 3429 nxge_status_t status = NXGE_OK; 3430 npi_status_t rs; 3431 3432 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3434 "Recovering from TxPort error...")); 3435 3436 if (isLDOMguest(nxgep)) { 3437 return (NXGE_OK); 3438 } 3439 3440 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3441 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3442 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3443 return (NXGE_ERROR); 3444 } 3445 3446 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3447 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3448 "<== nxge_tx_port_fatal_err_recover: " 3449 "NULL ring pointer(s)")); 3450 return (NXGE_ERROR); 3451 } 3452 3453 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3454 if ((1 << tdc) & set->owned.map) { 3455 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3456 if (ring) 3457 MUTEX_ENTER(&ring->lock); 3458 } 3459 } 3460 3461 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3462 3463 /* 3464 * Stop all the TDCs owned by us. 3465 * (The shared TDCs will have been stopped by their owners.) 3466 */ 3467 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3468 if ((1 << tdc) & set->owned.map) { 3469 ring = nxgep->tx_rings->rings[tdc]; 3470 if (ring) { 3471 rs = npi_txdma_channel_control 3472 (handle, TXDMA_STOP, tdc); 3473 if (rs != NPI_SUCCESS) { 3474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3475 "nxge_tx_port_fatal_err_recover " 3476 "(channel %d): stop failed ", tdc)); 3477 goto fail; 3478 } 3479 } 3480 } 3481 } 3482 3483 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3484 3485 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3486 if ((1 << tdc) & set->owned.map) { 3487 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3488 if (ring) { 3489 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3490 } 3491 } 3492 } 3493 3494 /* 3495 * Reset all the TDCs. 3496 */ 3497 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3498 3499 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3500 if ((1 << tdc) & set->owned.map) { 3501 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3502 if (ring) { 3503 if ((rs = npi_txdma_channel_control 3504 (handle, TXDMA_RESET, tdc)) 3505 != NPI_SUCCESS) { 3506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3507 "nxge_tx_port_fatal_err_recover " 3508 "(channel %d) reset channel " 3509 "failed 0x%x", tdc, rs)); 3510 goto fail; 3511 } 3512 } 3513 /* 3514 * Reset the tail (kick) register to 0. 3515 * (Hardware will not reset it. Tx overflow fatal 3516 * error if tail is not set to 0 after reset! 3517 */ 3518 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3519 } 3520 } 3521 3522 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3523 3524 /* Restart all the TDCs */ 3525 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3526 if ((1 << tdc) & set->owned.map) { 3527 ring = nxgep->tx_rings->rings[tdc]; 3528 if (ring) { 3529 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3530 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3531 ring, mailbox); 3532 ring->tx_evmask.value = 0; 3533 /* 3534 * Initialize the event masks. 3535 */ 3536 status = nxge_init_txdma_channel_event_mask 3537 (nxgep, tdc, &ring->tx_evmask); 3538 3539 ring->wr_index_wrap = B_FALSE; 3540 ring->wr_index = 0; 3541 ring->rd_index = 0; 3542 3543 if (status != NXGE_OK) 3544 goto fail; 3545 if (status != NXGE_OK) 3546 goto fail; 3547 } 3548 } 3549 } 3550 3551 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3552 3553 /* Re-enable all the TDCs */ 3554 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3555 if ((1 << tdc) & set->owned.map) { 3556 ring = nxgep->tx_rings->rings[tdc]; 3557 if (ring) { 3558 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3559 status = nxge_enable_txdma_channel(nxgep, tdc, 3560 ring, mailbox); 3561 if (status != NXGE_OK) 3562 goto fail; 3563 } 3564 } 3565 } 3566 3567 /* 3568 * Unlock all the TDCs. 3569 */ 3570 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3571 if ((1 << tdc) & set->owned.map) { 3572 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3573 if (ring) 3574 MUTEX_EXIT(&ring->lock); 3575 } 3576 } 3577 3578 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3579 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3580 3581 return (NXGE_OK); 3582 3583 fail: 3584 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3585 if ((1 << tdc) & set->owned.map) { 3586 ring = nxgep->tx_rings->rings[tdc]; 3587 if (ring) 3588 MUTEX_EXIT(&ring->lock); 3589 } 3590 } 3591 3592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3593 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3594 3595 return (status); 3596 } 3597 3598 /* 3599 * nxge_txdma_inject_err 3600 * 3601 * Inject an error into a TDC. 3602 * 3603 * Arguments: 3604 * nxgep 3605 * err_id The error to inject. 3606 * chan The channel to inject into. 3607 * 3608 * Notes: 3609 * This is called from nxge_main.c:nxge_err_inject() 3610 * Has this ioctl ever been used? 3611 * 3612 * NPI/NXGE function calls: 3613 * npi_txdma_inj_par_error_get() 3614 * npi_txdma_inj_par_error_set() 3615 * 3616 * Registers accessed: 3617 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3618 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3619 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3620 * 3621 * Context: 3622 * Service domain 3623 */ 3624 void 3625 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3626 { 3627 tdmc_intr_dbg_t tdi; 3628 tdmc_inj_par_err_t par_err; 3629 uint32_t value; 3630 npi_handle_t handle; 3631 3632 switch (err_id) { 3633 3634 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3635 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3636 /* Clear error injection source for parity error */ 3637 (void) npi_txdma_inj_par_error_get(handle, &value); 3638 par_err.value = value; 3639 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3640 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3641 3642 par_err.bits.ldw.inject_parity_error = (1 << chan); 3643 (void) npi_txdma_inj_par_error_get(handle, &value); 3644 par_err.value = value; 3645 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3646 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3647 (unsigned long long)par_err.value); 3648 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3649 break; 3650 3651 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3652 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3653 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3654 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3655 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3656 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3657 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3658 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3659 chan, &tdi.value); 3660 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3661 tdi.bits.ldw.pref_buf_par_err = 1; 3662 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3663 tdi.bits.ldw.mbox_err = 1; 3664 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3665 tdi.bits.ldw.nack_pref = 1; 3666 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3667 tdi.bits.ldw.nack_pkt_rd = 1; 3668 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3669 tdi.bits.ldw.pkt_size_err = 1; 3670 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3671 tdi.bits.ldw.tx_ring_oflow = 1; 3672 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3673 tdi.bits.ldw.conf_part_err = 1; 3674 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3675 tdi.bits.ldw.pkt_part_err = 1; 3676 #if defined(__i386) 3677 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3678 tdi.value); 3679 #else 3680 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3681 tdi.value); 3682 #endif 3683 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3684 chan, tdi.value); 3685 3686 break; 3687 } 3688 } 3689