1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/nxge/nxge_impl.h> 28 #include <sys/nxge/nxge_txdma.h> 29 #include <sys/nxge/nxge_hio.h> 30 #include <npi_tx_rd64.h> 31 #include <npi_tx_wr64.h> 32 #include <sys/llc1.h> 33 34 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 35 uint32_t nxge_tx_minfree = 64; 36 uint32_t nxge_tx_intr_thres = 0; 37 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 38 uint32_t nxge_tx_tiny_pack = 1; 39 uint32_t nxge_tx_use_bcopy = 1; 40 41 extern uint32_t nxge_tx_ring_size; 42 extern uint32_t nxge_bcopy_thresh; 43 extern uint32_t nxge_dvma_thresh; 44 extern uint32_t nxge_dma_stream_thresh; 45 extern dma_method_t nxge_force_dma; 46 extern uint32_t nxge_cksum_offload; 47 48 /* Device register access attributes for PIO. */ 49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 50 /* Device descriptor access attributes for DMA. */ 51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 52 /* Device buffer access attributes for DMA. */ 53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 54 extern ddi_dma_attr_t nxge_desc_dma_attr; 55 extern ddi_dma_attr_t nxge_tx_dma_attr; 56 57 extern void nxge_tx_ring_task(void *arg); 58 59 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 60 61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 62 63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 64 p_nxge_dma_common_t *, p_tx_ring_t *, 65 uint32_t, p_nxge_dma_common_t *, 66 p_tx_mbox_t *); 67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 68 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 72 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_tx_ring_t, 75 p_tx_mbox_t *); 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 77 p_tx_ring_t, p_tx_mbox_t); 78 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 82 83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 85 p_nxge_ldv_t, tx_cs_t); 86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 88 uint16_t, p_tx_ring_t); 89 90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 91 p_tx_ring_t ring_p, uint16_t channel); 92 93 nxge_status_t 94 nxge_init_txdma_channels(p_nxge_t nxgep) 95 { 96 nxge_grp_set_t *set = &nxgep->tx_set; 97 int i, tdc, count; 98 nxge_grp_t *group; 99 dc_map_t map; 100 int dev_gindex; 101 102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 103 104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 105 if ((1 << i) & set->lg.map) { 106 group = set->group[i]; 107 dev_gindex = 108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 109 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 111 if ((1 << tdc) & map) { 112 if ((nxge_grp_dc_add(nxgep, 113 group, VP_BOUND_TX, tdc))) 114 goto init_txdma_channels_exit; 115 } 116 } 117 } 118 if (++count == set->lg.count) 119 break; 120 } 121 122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 123 return (NXGE_OK); 124 125 init_txdma_channels_exit: 126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 127 if ((1 << i) & set->lg.map) { 128 group = set->group[i]; 129 dev_gindex = 130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 131 map = nxgep->pt_config.tdc_grps[dev_gindex].map; 132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 133 if ((1 << tdc) & map) { 134 nxge_grp_dc_remove(nxgep, 135 VP_BOUND_TX, tdc); 136 } 137 } 138 } 139 if (++count == set->lg.count) 140 break; 141 } 142 143 return (NXGE_ERROR); 144 145 } 146 147 nxge_status_t 148 nxge_init_txdma_channel( 149 p_nxge_t nxge, 150 int channel) 151 { 152 nxge_status_t status; 153 154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 155 156 status = nxge_map_txdma(nxge, channel); 157 if (status != NXGE_OK) { 158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 159 "<== nxge_init_txdma_channel: status 0x%x", status)); 160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 161 return (status); 162 } 163 164 status = nxge_txdma_hw_start(nxge, channel); 165 if (status != NXGE_OK) { 166 (void) nxge_unmap_txdma_channel(nxge, channel); 167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 168 return (status); 169 } 170 171 if (!nxge->statsp->tdc_ksp[channel]) 172 nxge_setup_tdc_kstats(nxge, channel); 173 174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 175 176 return (status); 177 } 178 179 void 180 nxge_uninit_txdma_channels(p_nxge_t nxgep) 181 { 182 nxge_grp_set_t *set = &nxgep->tx_set; 183 int tdc; 184 185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 186 187 if (set->owned.map == 0) { 188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 189 "nxge_uninit_txdma_channels: no channels")); 190 return; 191 } 192 193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 194 if ((1 << tdc) & set->owned.map) { 195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 196 } 197 } 198 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 200 } 201 202 void 203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 204 { 205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 206 207 if (nxgep->statsp->tdc_ksp[channel]) { 208 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 209 nxgep->statsp->tdc_ksp[channel] = 0; 210 } 211 212 (void) nxge_txdma_stop_channel(nxgep, channel); 213 nxge_unmap_txdma_channel(nxgep, channel); 214 215 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 216 "<== nxge_uninit_txdma_channel")); 217 } 218 219 void 220 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 221 uint32_t entries, uint32_t size) 222 { 223 size_t tsize; 224 *dest_p = *src_p; 225 tsize = size * entries; 226 dest_p->alength = tsize; 227 dest_p->nblocks = entries; 228 dest_p->block_size = size; 229 dest_p->offset += tsize; 230 231 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 232 src_p->alength -= tsize; 233 src_p->dma_cookie.dmac_laddress += tsize; 234 src_p->dma_cookie.dmac_size -= tsize; 235 } 236 237 /* 238 * nxge_reset_txdma_channel 239 * 240 * Reset a TDC. 241 * 242 * Arguments: 243 * nxgep 244 * channel The channel to reset. 245 * reg_data The current TX_CS. 246 * 247 * Notes: 248 * 249 * NPI/NXGE function calls: 250 * npi_txdma_channel_reset() 251 * npi_txdma_channel_control() 252 * 253 * Registers accessed: 254 * TX_CS DMC+0x40028 Transmit Control And Status 255 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 256 * 257 * Context: 258 * Any domain 259 */ 260 nxge_status_t 261 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 262 { 263 npi_status_t rs = NPI_SUCCESS; 264 nxge_status_t status = NXGE_OK; 265 npi_handle_t handle; 266 267 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 268 269 handle = NXGE_DEV_NPI_HANDLE(nxgep); 270 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 271 rs = npi_txdma_channel_reset(handle, channel); 272 } else { 273 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 274 channel); 275 } 276 277 if (rs != NPI_SUCCESS) { 278 status = NXGE_ERROR | rs; 279 } 280 281 /* 282 * Reset the tail (kick) register to 0. 283 * (Hardware will not reset it. Tx overflow fatal 284 * error if tail is not set to 0 after reset! 285 */ 286 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 287 288 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 289 return (status); 290 } 291 292 /* 293 * nxge_init_txdma_channel_event_mask 294 * 295 * Enable interrupts for a set of events. 296 * 297 * Arguments: 298 * nxgep 299 * channel The channel to map. 300 * mask_p The events to enable. 301 * 302 * Notes: 303 * 304 * NPI/NXGE function calls: 305 * npi_txdma_event_mask() 306 * 307 * Registers accessed: 308 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 309 * 310 * Context: 311 * Any domain 312 */ 313 nxge_status_t 314 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 315 p_tx_dma_ent_msk_t mask_p) 316 { 317 npi_handle_t handle; 318 npi_status_t rs = NPI_SUCCESS; 319 nxge_status_t status = NXGE_OK; 320 321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 322 "<== nxge_init_txdma_channel_event_mask")); 323 324 handle = NXGE_DEV_NPI_HANDLE(nxgep); 325 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 326 if (rs != NPI_SUCCESS) { 327 status = NXGE_ERROR | rs; 328 } 329 330 return (status); 331 } 332 333 /* 334 * nxge_init_txdma_channel_cntl_stat 335 * 336 * Stop a TDC. If at first we don't succeed, inject an error. 337 * 338 * Arguments: 339 * nxgep 340 * channel The channel to stop. 341 * 342 * Notes: 343 * 344 * NPI/NXGE function calls: 345 * npi_txdma_control_status() 346 * 347 * Registers accessed: 348 * TX_CS DMC+0x40028 Transmit Control And Status 349 * 350 * Context: 351 * Any domain 352 */ 353 nxge_status_t 354 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 355 uint64_t reg_data) 356 { 357 npi_handle_t handle; 358 npi_status_t rs = NPI_SUCCESS; 359 nxge_status_t status = NXGE_OK; 360 361 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 362 "<== nxge_init_txdma_channel_cntl_stat")); 363 364 handle = NXGE_DEV_NPI_HANDLE(nxgep); 365 rs = npi_txdma_control_status(handle, OP_SET, channel, 366 (p_tx_cs_t)®_data); 367 368 if (rs != NPI_SUCCESS) { 369 status = NXGE_ERROR | rs; 370 } 371 372 return (status); 373 } 374 375 /* 376 * nxge_enable_txdma_channel 377 * 378 * Enable a TDC. 379 * 380 * Arguments: 381 * nxgep 382 * channel The channel to enable. 383 * tx_desc_p channel's transmit descriptor ring. 384 * mbox_p channel's mailbox, 385 * 386 * Notes: 387 * 388 * NPI/NXGE function calls: 389 * npi_txdma_ring_config() 390 * npi_txdma_mbox_config() 391 * npi_txdma_channel_init_enable() 392 * 393 * Registers accessed: 394 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 395 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 396 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 397 * TX_CS DMC+0x40028 Transmit Control And Status 398 * 399 * Context: 400 * Any domain 401 */ 402 nxge_status_t 403 nxge_enable_txdma_channel(p_nxge_t nxgep, 404 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 405 { 406 npi_handle_t handle; 407 npi_status_t rs = NPI_SUCCESS; 408 nxge_status_t status = NXGE_OK; 409 410 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 411 412 handle = NXGE_DEV_NPI_HANDLE(nxgep); 413 /* 414 * Use configuration data composed at init time. 415 * Write to hardware the transmit ring configurations. 416 */ 417 rs = npi_txdma_ring_config(handle, OP_SET, channel, 418 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 419 420 if (rs != NPI_SUCCESS) { 421 return (NXGE_ERROR | rs); 422 } 423 424 if (isLDOMguest(nxgep)) { 425 /* Add interrupt handler for this channel. */ 426 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 427 return (NXGE_ERROR); 428 } 429 430 /* Write to hardware the mailbox */ 431 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 432 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 433 434 if (rs != NPI_SUCCESS) { 435 return (NXGE_ERROR | rs); 436 } 437 438 /* Start the DMA engine. */ 439 rs = npi_txdma_channel_init_enable(handle, channel); 440 441 if (rs != NPI_SUCCESS) { 442 return (NXGE_ERROR | rs); 443 } 444 445 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 446 447 return (status); 448 } 449 450 void 451 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 452 boolean_t l4_cksum, int pkt_len, uint8_t npads, 453 p_tx_pkt_hdr_all_t pkthdrp, 454 t_uscalar_t start_offset, 455 t_uscalar_t stuff_offset) 456 { 457 p_tx_pkt_header_t hdrp; 458 p_mblk_t nmp; 459 uint64_t tmp; 460 size_t mblk_len; 461 size_t iph_len; 462 size_t hdrs_size; 463 uint8_t hdrs_buf[sizeof (struct ether_header) + 464 64 + sizeof (uint32_t)]; 465 uint8_t *cursor; 466 uint8_t *ip_buf; 467 uint16_t eth_type; 468 uint8_t ipproto; 469 boolean_t is_vlan = B_FALSE; 470 size_t eth_hdr_size; 471 472 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 473 474 /* 475 * Caller should zero out the headers first. 476 */ 477 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 478 479 if (fill_len) { 480 NXGE_DEBUG_MSG((NULL, TX_CTL, 481 "==> nxge_fill_tx_hdr: pkt_len %d " 482 "npads %d", pkt_len, npads)); 483 tmp = (uint64_t)pkt_len; 484 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 485 goto fill_tx_header_done; 486 } 487 488 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 489 490 /* 491 * mp is the original data packet (does not include the 492 * Neptune transmit header). 493 */ 494 nmp = mp; 495 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 496 "mp $%p b_rptr $%p len %d", 497 mp, nmp->b_rptr, MBLKL(nmp))); 498 /* copy ether_header from mblk to hdrs_buf */ 499 cursor = &hdrs_buf[0]; 500 tmp = sizeof (struct ether_vlan_header); 501 while ((nmp != NULL) && (tmp > 0)) { 502 size_t buflen; 503 mblk_len = MBLKL(nmp); 504 buflen = min((size_t)tmp, mblk_len); 505 bcopy(nmp->b_rptr, cursor, buflen); 506 cursor += buflen; 507 tmp -= buflen; 508 nmp = nmp->b_cont; 509 } 510 511 nmp = mp; 512 mblk_len = MBLKL(nmp); 513 ip_buf = NULL; 514 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 515 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 516 "ether type 0x%x", eth_type, hdrp->value)); 517 518 if (eth_type < ETHERMTU) { 519 tmp = 1ull; 520 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 521 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 522 "value 0x%llx", hdrp->value)); 523 if (*(hdrs_buf + sizeof (struct ether_header)) 524 == LLC_SNAP_SAP) { 525 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 526 sizeof (struct ether_header) + 6))); 527 NXGE_DEBUG_MSG((NULL, TX_CTL, 528 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 529 eth_type)); 530 } else { 531 goto fill_tx_header_done; 532 } 533 } else if (eth_type == VLAN_ETHERTYPE) { 534 tmp = 1ull; 535 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 536 537 eth_type = ntohs(((struct ether_vlan_header *) 538 hdrs_buf)->ether_type); 539 is_vlan = B_TRUE; 540 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 541 "value 0x%llx", hdrp->value)); 542 } 543 544 if (!is_vlan) { 545 eth_hdr_size = sizeof (struct ether_header); 546 } else { 547 eth_hdr_size = sizeof (struct ether_vlan_header); 548 } 549 550 switch (eth_type) { 551 case ETHERTYPE_IP: 552 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 553 ip_buf = nmp->b_rptr + eth_hdr_size; 554 mblk_len -= eth_hdr_size; 555 iph_len = ((*ip_buf) & 0x0f); 556 if (mblk_len > (iph_len + sizeof (uint32_t))) { 557 ip_buf = nmp->b_rptr; 558 ip_buf += eth_hdr_size; 559 } else { 560 ip_buf = NULL; 561 } 562 563 } 564 if (ip_buf == NULL) { 565 hdrs_size = 0; 566 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 567 while ((nmp) && (hdrs_size < 568 sizeof (hdrs_buf))) { 569 mblk_len = (size_t)nmp->b_wptr - 570 (size_t)nmp->b_rptr; 571 if (mblk_len >= 572 (sizeof (hdrs_buf) - hdrs_size)) 573 mblk_len = sizeof (hdrs_buf) - 574 hdrs_size; 575 bcopy(nmp->b_rptr, 576 &hdrs_buf[hdrs_size], mblk_len); 577 hdrs_size += mblk_len; 578 nmp = nmp->b_cont; 579 } 580 ip_buf = hdrs_buf; 581 ip_buf += eth_hdr_size; 582 iph_len = ((*ip_buf) & 0x0f); 583 } 584 585 ipproto = ip_buf[9]; 586 587 tmp = (uint64_t)iph_len; 588 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 589 tmp = (uint64_t)(eth_hdr_size >> 1); 590 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 591 592 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 593 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 594 "tmp 0x%x", 595 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 596 ipproto, tmp)); 597 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 598 "value 0x%llx", hdrp->value)); 599 600 break; 601 602 case ETHERTYPE_IPV6: 603 hdrs_size = 0; 604 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 605 while ((nmp) && (hdrs_size < 606 sizeof (hdrs_buf))) { 607 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 608 if (mblk_len >= 609 (sizeof (hdrs_buf) - hdrs_size)) 610 mblk_len = sizeof (hdrs_buf) - 611 hdrs_size; 612 bcopy(nmp->b_rptr, 613 &hdrs_buf[hdrs_size], mblk_len); 614 hdrs_size += mblk_len; 615 nmp = nmp->b_cont; 616 } 617 ip_buf = hdrs_buf; 618 ip_buf += eth_hdr_size; 619 620 tmp = 1ull; 621 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 622 623 tmp = (eth_hdr_size >> 1); 624 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 625 626 /* byte 6 is the next header protocol */ 627 ipproto = ip_buf[6]; 628 629 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 630 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 631 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 632 ipproto)); 633 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 634 "value 0x%llx", hdrp->value)); 635 636 break; 637 638 default: 639 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 640 goto fill_tx_header_done; 641 } 642 643 switch (ipproto) { 644 case IPPROTO_TCP: 645 NXGE_DEBUG_MSG((NULL, TX_CTL, 646 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 647 if (l4_cksum) { 648 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 649 hdrp->value |= 650 (((uint64_t)(start_offset >> 1)) << 651 TX_PKT_HEADER_L4START_SHIFT); 652 hdrp->value |= 653 (((uint64_t)(stuff_offset >> 1)) << 654 TX_PKT_HEADER_L4STUFF_SHIFT); 655 656 NXGE_DEBUG_MSG((NULL, TX_CTL, 657 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 658 "value 0x%llx", hdrp->value)); 659 } 660 661 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 662 "value 0x%llx", hdrp->value)); 663 break; 664 665 case IPPROTO_UDP: 666 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 667 if (l4_cksum) { 668 if (!nxge_cksum_offload) { 669 uint16_t *up; 670 uint16_t cksum; 671 t_uscalar_t stuff_len; 672 673 /* 674 * The checksum field has the 675 * partial checksum. 676 * IP_CSUM() macro calls ip_cksum() which 677 * can add in the partial checksum. 678 */ 679 cksum = IP_CSUM(mp, start_offset, 0); 680 stuff_len = stuff_offset; 681 nmp = mp; 682 mblk_len = MBLKL(nmp); 683 while ((nmp != NULL) && 684 (mblk_len < stuff_len)) { 685 stuff_len -= mblk_len; 686 nmp = nmp->b_cont; 687 } 688 ASSERT(nmp); 689 up = (uint16_t *)(nmp->b_rptr + stuff_len); 690 691 *up = cksum; 692 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 693 NXGE_DEBUG_MSG((NULL, TX_CTL, 694 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 695 "use sw cksum " 696 "write to $%p cksum 0x%x content up 0x%x", 697 stuff_len, 698 up, 699 cksum, 700 *up)); 701 } else { 702 /* Hardware will compute the full checksum */ 703 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 704 hdrp->value |= 705 (((uint64_t)(start_offset >> 1)) << 706 TX_PKT_HEADER_L4START_SHIFT); 707 hdrp->value |= 708 (((uint64_t)(stuff_offset >> 1)) << 709 TX_PKT_HEADER_L4STUFF_SHIFT); 710 711 NXGE_DEBUG_MSG((NULL, TX_CTL, 712 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 713 " use partial checksum " 714 "cksum 0x%x ", 715 "value 0x%llx", 716 stuff_offset, 717 IP_CSUM(mp, start_offset, 0), 718 hdrp->value)); 719 } 720 } 721 722 NXGE_DEBUG_MSG((NULL, TX_CTL, 723 "==> nxge_tx_pkt_hdr_init: UDP" 724 "value 0x%llx", hdrp->value)); 725 break; 726 727 default: 728 goto fill_tx_header_done; 729 } 730 731 fill_tx_header_done: 732 NXGE_DEBUG_MSG((NULL, TX_CTL, 733 "==> nxge_fill_tx_hdr: pkt_len %d " 734 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 735 736 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 737 } 738 739 /*ARGSUSED*/ 740 p_mblk_t 741 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 742 { 743 p_mblk_t newmp = NULL; 744 745 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 746 NXGE_DEBUG_MSG((NULL, TX_CTL, 747 "<== nxge_tx_pkt_header_reserve: allocb failed")); 748 return (NULL); 749 } 750 751 NXGE_DEBUG_MSG((NULL, TX_CTL, 752 "==> nxge_tx_pkt_header_reserve: get new mp")); 753 DB_TYPE(newmp) = M_DATA; 754 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 755 linkb(newmp, mp); 756 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 757 758 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 759 "b_rptr $%p b_wptr $%p", 760 newmp->b_rptr, newmp->b_wptr)); 761 762 NXGE_DEBUG_MSG((NULL, TX_CTL, 763 "<== nxge_tx_pkt_header_reserve: use new mp")); 764 765 return (newmp); 766 } 767 768 int 769 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 770 { 771 uint_t nmblks; 772 ssize_t len; 773 uint_t pkt_len; 774 p_mblk_t nmp, bmp, tmp; 775 uint8_t *b_wptr; 776 777 NXGE_DEBUG_MSG((NULL, TX_CTL, 778 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 779 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 780 781 nmp = mp; 782 bmp = mp; 783 nmblks = 0; 784 pkt_len = 0; 785 *tot_xfer_len_p = 0; 786 787 while (nmp) { 788 len = MBLKL(nmp); 789 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 790 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 791 len, pkt_len, nmblks, 792 *tot_xfer_len_p)); 793 794 if (len <= 0) { 795 bmp = nmp; 796 nmp = nmp->b_cont; 797 NXGE_DEBUG_MSG((NULL, TX_CTL, 798 "==> nxge_tx_pkt_nmblocks: " 799 "len (0) pkt_len %d nmblks %d", 800 pkt_len, nmblks)); 801 continue; 802 } 803 804 *tot_xfer_len_p += len; 805 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 806 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 807 len, pkt_len, nmblks, 808 *tot_xfer_len_p)); 809 810 if (len < nxge_bcopy_thresh) { 811 NXGE_DEBUG_MSG((NULL, TX_CTL, 812 "==> nxge_tx_pkt_nmblocks: " 813 "len %d (< thresh) pkt_len %d nmblks %d", 814 len, pkt_len, nmblks)); 815 if (pkt_len == 0) 816 nmblks++; 817 pkt_len += len; 818 if (pkt_len >= nxge_bcopy_thresh) { 819 pkt_len = 0; 820 len = 0; 821 nmp = bmp; 822 } 823 } else { 824 NXGE_DEBUG_MSG((NULL, TX_CTL, 825 "==> nxge_tx_pkt_nmblocks: " 826 "len %d (> thresh) pkt_len %d nmblks %d", 827 len, pkt_len, nmblks)); 828 pkt_len = 0; 829 nmblks++; 830 /* 831 * Hardware limits the transfer length to 4K. 832 * If len is more than 4K, we need to break 833 * it up to at most 2 more blocks. 834 */ 835 if (len > TX_MAX_TRANSFER_LENGTH) { 836 uint32_t nsegs; 837 838 nsegs = 1; 839 NXGE_DEBUG_MSG((NULL, TX_CTL, 840 "==> nxge_tx_pkt_nmblocks: " 841 "len %d pkt_len %d nmblks %d nsegs %d", 842 len, pkt_len, nmblks, nsegs)); 843 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 844 ++nsegs; 845 } 846 do { 847 b_wptr = nmp->b_rptr + 848 TX_MAX_TRANSFER_LENGTH; 849 nmp->b_wptr = b_wptr; 850 if ((tmp = dupb(nmp)) == NULL) { 851 return (0); 852 } 853 tmp->b_rptr = b_wptr; 854 tmp->b_wptr = nmp->b_wptr; 855 tmp->b_cont = nmp->b_cont; 856 nmp->b_cont = tmp; 857 nmblks++; 858 if (--nsegs) { 859 nmp = tmp; 860 } 861 } while (nsegs); 862 nmp = tmp; 863 } 864 } 865 866 /* 867 * Hardware limits the transmit gather pointers to 15. 868 */ 869 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 870 TX_MAX_GATHER_POINTERS) { 871 NXGE_DEBUG_MSG((NULL, TX_CTL, 872 "==> nxge_tx_pkt_nmblocks: pull msg - " 873 "len %d pkt_len %d nmblks %d", 874 len, pkt_len, nmblks)); 875 /* Pull all message blocks from b_cont */ 876 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 877 return (0); 878 } 879 freemsg(nmp->b_cont); 880 nmp->b_cont = tmp; 881 pkt_len = 0; 882 } 883 bmp = nmp; 884 nmp = nmp->b_cont; 885 } 886 887 NXGE_DEBUG_MSG((NULL, TX_CTL, 888 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 889 "nmblks %d len %d tot_xfer_len %d", 890 mp->b_rptr, mp->b_wptr, nmblks, 891 MBLKL(mp), *tot_xfer_len_p)); 892 893 return (nmblks); 894 } 895 896 boolean_t 897 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 898 { 899 boolean_t status = B_TRUE; 900 p_nxge_dma_common_t tx_desc_dma_p; 901 nxge_dma_common_t desc_area; 902 p_tx_desc_t tx_desc_ring_vp; 903 p_tx_desc_t tx_desc_p; 904 p_tx_desc_t tx_desc_pp; 905 tx_desc_t r_tx_desc; 906 p_tx_msg_t tx_msg_ring; 907 p_tx_msg_t tx_msg_p; 908 npi_handle_t handle; 909 tx_ring_hdl_t tx_head; 910 uint32_t pkt_len; 911 uint_t tx_rd_index; 912 uint16_t head_index, tail_index; 913 uint8_t tdc; 914 boolean_t head_wrap, tail_wrap; 915 p_nxge_tx_ring_stats_t tdc_stats; 916 int rc; 917 918 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 919 920 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 921 (nmblks != 0)); 922 NXGE_DEBUG_MSG((nxgep, TX_CTL, 923 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 924 tx_ring_p->descs_pending, nxge_reclaim_pending, 925 nmblks)); 926 if (!status) { 927 tx_desc_dma_p = &tx_ring_p->tdc_desc; 928 desc_area = tx_ring_p->tdc_desc; 929 handle = NXGE_DEV_NPI_HANDLE(nxgep); 930 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 931 tx_desc_ring_vp = 932 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 933 tx_rd_index = tx_ring_p->rd_index; 934 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 935 tx_msg_ring = tx_ring_p->tx_msg_ring; 936 tx_msg_p = &tx_msg_ring[tx_rd_index]; 937 tdc = tx_ring_p->tdc; 938 tdc_stats = tx_ring_p->tdc_stats; 939 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 940 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 941 } 942 943 tail_index = tx_ring_p->wr_index; 944 tail_wrap = tx_ring_p->wr_index_wrap; 945 946 NXGE_DEBUG_MSG((nxgep, TX_CTL, 947 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 948 "tail_index %d tail_wrap %d " 949 "tx_desc_p $%p ($%p) ", 950 tdc, tx_rd_index, tail_index, tail_wrap, 951 tx_desc_p, (*(uint64_t *)tx_desc_p))); 952 /* 953 * Read the hardware maintained transmit head 954 * and wrap around bit. 955 */ 956 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 957 head_index = tx_head.bits.ldw.head; 958 head_wrap = tx_head.bits.ldw.wrap; 959 NXGE_DEBUG_MSG((nxgep, TX_CTL, 960 "==> nxge_txdma_reclaim: " 961 "tx_rd_index %d tail %d tail_wrap %d " 962 "head %d wrap %d", 963 tx_rd_index, tail_index, tail_wrap, 964 head_index, head_wrap)); 965 966 if (head_index == tail_index) { 967 if (TXDMA_RING_EMPTY(head_index, head_wrap, 968 tail_index, tail_wrap) && 969 (head_index == tx_rd_index)) { 970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 971 "==> nxge_txdma_reclaim: EMPTY")); 972 return (B_TRUE); 973 } 974 975 NXGE_DEBUG_MSG((nxgep, TX_CTL, 976 "==> nxge_txdma_reclaim: Checking " 977 "if ring full")); 978 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 979 tail_wrap)) { 980 NXGE_DEBUG_MSG((nxgep, TX_CTL, 981 "==> nxge_txdma_reclaim: full")); 982 return (B_FALSE); 983 } 984 } 985 986 NXGE_DEBUG_MSG((nxgep, TX_CTL, 987 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 988 989 tx_desc_pp = &r_tx_desc; 990 while ((tx_rd_index != head_index) && 991 (tx_ring_p->descs_pending != 0)) { 992 993 NXGE_DEBUG_MSG((nxgep, TX_CTL, 994 "==> nxge_txdma_reclaim: Checking if pending")); 995 996 NXGE_DEBUG_MSG((nxgep, TX_CTL, 997 "==> nxge_txdma_reclaim: " 998 "descs_pending %d ", 999 tx_ring_p->descs_pending)); 1000 1001 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1002 "==> nxge_txdma_reclaim: " 1003 "(tx_rd_index %d head_index %d " 1004 "(tx_desc_p $%p)", 1005 tx_rd_index, head_index, 1006 tx_desc_p)); 1007 1008 tx_desc_pp->value = tx_desc_p->value; 1009 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1010 "==> nxge_txdma_reclaim: " 1011 "(tx_rd_index %d head_index %d " 1012 "tx_desc_p $%p (desc value 0x%llx) ", 1013 tx_rd_index, head_index, 1014 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 1015 1016 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1017 "==> nxge_txdma_reclaim: dump desc:")); 1018 1019 pkt_len = tx_desc_pp->bits.hdw.tr_len; 1020 tdc_stats->obytes += pkt_len; 1021 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 1022 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1023 "==> nxge_txdma_reclaim: pkt_len %d " 1024 "tdc channel %d opackets %d", 1025 pkt_len, 1026 tdc, 1027 tdc_stats->opackets)); 1028 1029 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1030 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1031 "tx_desc_p = $%p " 1032 "tx_desc_pp = $%p " 1033 "index = %d", 1034 tx_desc_p, 1035 tx_desc_pp, 1036 tx_ring_p->rd_index)); 1037 (void) dvma_unload(tx_msg_p->dvma_handle, 1038 0, -1); 1039 tx_msg_p->dvma_handle = NULL; 1040 if (tx_ring_p->dvma_wr_index == 1041 tx_ring_p->dvma_wrap_mask) { 1042 tx_ring_p->dvma_wr_index = 0; 1043 } else { 1044 tx_ring_p->dvma_wr_index++; 1045 } 1046 tx_ring_p->dvma_pending--; 1047 } else if (tx_msg_p->flags.dma_type == 1048 USE_DMA) { 1049 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1050 "==> nxge_txdma_reclaim: " 1051 "USE DMA")); 1052 if (rc = ddi_dma_unbind_handle 1053 (tx_msg_p->dma_handle)) { 1054 cmn_err(CE_WARN, "!nxge_reclaim: " 1055 "ddi_dma_unbind_handle " 1056 "failed. status %d", rc); 1057 } 1058 } 1059 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1060 "==> nxge_txdma_reclaim: count packets")); 1061 /* 1062 * count a chained packet only once. 1063 */ 1064 if (tx_msg_p->tx_message != NULL) { 1065 freemsg(tx_msg_p->tx_message); 1066 tx_msg_p->tx_message = NULL; 1067 } 1068 1069 tx_msg_p->flags.dma_type = USE_NONE; 1070 tx_rd_index = tx_ring_p->rd_index; 1071 tx_rd_index = (tx_rd_index + 1) & 1072 tx_ring_p->tx_wrap_mask; 1073 tx_ring_p->rd_index = tx_rd_index; 1074 tx_ring_p->descs_pending--; 1075 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1076 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1077 } 1078 1079 status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 1080 (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 1081 if (status) { 1082 (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1083 } 1084 } else { 1085 status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 1086 (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 1087 } 1088 1089 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1090 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1091 1092 return (status); 1093 } 1094 1095 /* 1096 * nxge_tx_intr 1097 * 1098 * Process a TDC interrupt 1099 * 1100 * Arguments: 1101 * arg1 A Logical Device state Vector (LSV) data structure. 1102 * arg2 nxge_t * 1103 * 1104 * Notes: 1105 * 1106 * NPI/NXGE function calls: 1107 * npi_txdma_control_status() 1108 * npi_intr_ldg_mgmt_set() 1109 * 1110 * nxge_tx_err_evnts() 1111 * nxge_txdma_reclaim() 1112 * 1113 * Registers accessed: 1114 * TX_CS DMC+0x40028 Transmit Control And Status 1115 * PIO_LDSV 1116 * 1117 * Context: 1118 * Any domain 1119 */ 1120 uint_t 1121 nxge_tx_intr(void *arg1, void *arg2) 1122 { 1123 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1124 p_nxge_t nxgep = (p_nxge_t)arg2; 1125 p_nxge_ldg_t ldgp; 1126 uint8_t channel; 1127 uint32_t vindex; 1128 npi_handle_t handle; 1129 tx_cs_t cs; 1130 p_tx_ring_t *tx_rings; 1131 p_tx_ring_t tx_ring_p; 1132 npi_status_t rs = NPI_SUCCESS; 1133 uint_t serviced = DDI_INTR_UNCLAIMED; 1134 nxge_status_t status = NXGE_OK; 1135 1136 if (ldvp == NULL) { 1137 NXGE_DEBUG_MSG((NULL, INT_CTL, 1138 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1139 nxgep, ldvp)); 1140 return (DDI_INTR_UNCLAIMED); 1141 } 1142 1143 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1144 nxgep = ldvp->nxgep; 1145 } 1146 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1147 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1148 nxgep, ldvp)); 1149 1150 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1151 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1152 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1153 "<== nxge_tx_intr: interface not started or intialized")); 1154 return (DDI_INTR_CLAIMED); 1155 } 1156 1157 /* 1158 * This interrupt handler is for a specific 1159 * transmit dma channel. 1160 */ 1161 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1162 /* Get the control and status for this channel. */ 1163 channel = ldvp->channel; 1164 ldgp = ldvp->ldgp; 1165 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1166 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1167 "channel %d", 1168 nxgep, ldvp, channel)); 1169 1170 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1171 vindex = ldvp->vdma_index; 1172 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1173 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1174 channel, vindex, rs)); 1175 if (!rs && cs.bits.ldw.mk) { 1176 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1177 "==> nxge_tx_intr:channel %d ring index %d " 1178 "status 0x%08x (mk bit set)", 1179 channel, vindex, rs)); 1180 tx_rings = nxgep->tx_rings->rings; 1181 tx_ring_p = tx_rings[vindex]; 1182 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1183 "==> nxge_tx_intr:channel %d ring index %d " 1184 "status 0x%08x (mk bit set, calling reclaim)", 1185 channel, vindex, rs)); 1186 1187 nxge_tx_ring_task((void *)tx_ring_p); 1188 } 1189 1190 /* 1191 * Process other transmit control and status. 1192 * Check the ldv state. 1193 */ 1194 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1195 /* 1196 * Rearm this logical group if this is a single device 1197 * group. 1198 */ 1199 if (ldgp->nldvs == 1) { 1200 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1201 "==> nxge_tx_intr: rearm")); 1202 if (status == NXGE_OK) { 1203 if (isLDOMguest(nxgep)) { 1204 nxge_hio_ldgimgn(nxgep, ldgp); 1205 } else { 1206 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1207 B_TRUE, ldgp->ldg_timer); 1208 } 1209 } 1210 } 1211 1212 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1213 serviced = DDI_INTR_CLAIMED; 1214 return (serviced); 1215 } 1216 1217 void 1218 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1219 { 1220 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1221 1222 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1223 1224 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1225 } 1226 1227 void 1228 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1229 { 1230 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1231 1232 (void) nxge_txdma_stop(nxgep); 1233 1234 (void) nxge_fixup_txdma_rings(nxgep); 1235 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1236 (void) nxge_tx_mac_enable(nxgep); 1237 (void) nxge_txdma_hw_kick(nxgep); 1238 1239 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1240 } 1241 1242 npi_status_t 1243 nxge_txdma_channel_disable( 1244 nxge_t *nxge, 1245 int channel) 1246 { 1247 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1248 npi_status_t rs; 1249 tdmc_intr_dbg_t intr_dbg; 1250 1251 /* 1252 * Stop the dma channel and wait for the stop-done. 1253 * If the stop-done bit is not present, then force 1254 * an error so TXC will stop. 1255 * All channels bound to this port need to be stopped 1256 * and reset after injecting an interrupt error. 1257 */ 1258 rs = npi_txdma_channel_disable(handle, channel); 1259 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1260 "==> nxge_txdma_channel_disable(%d) " 1261 "rs 0x%x", channel, rs)); 1262 if (rs != NPI_SUCCESS) { 1263 /* Inject any error */ 1264 intr_dbg.value = 0; 1265 intr_dbg.bits.ldw.nack_pref = 1; 1266 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1267 "==> nxge_txdma_hw_mode: " 1268 "channel %d (stop failed 0x%x) " 1269 "(inject err)", rs, channel)); 1270 (void) npi_txdma_inj_int_error_set( 1271 handle, channel, &intr_dbg); 1272 rs = npi_txdma_channel_disable(handle, channel); 1273 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1274 "==> nxge_txdma_hw_mode: " 1275 "channel %d (stop again 0x%x) " 1276 "(after inject err)", 1277 rs, channel)); 1278 } 1279 1280 return (rs); 1281 } 1282 1283 /* 1284 * nxge_txdma_hw_mode 1285 * 1286 * Toggle all TDCs on (enable) or off (disable). 1287 * 1288 * Arguments: 1289 * nxgep 1290 * enable Enable or disable a TDC. 1291 * 1292 * Notes: 1293 * 1294 * NPI/NXGE function calls: 1295 * npi_txdma_channel_enable(TX_CS) 1296 * npi_txdma_channel_disable(TX_CS) 1297 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1298 * 1299 * Registers accessed: 1300 * TX_CS DMC+0x40028 Transmit Control And Status 1301 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1302 * 1303 * Context: 1304 * Any domain 1305 */ 1306 nxge_status_t 1307 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1308 { 1309 nxge_grp_set_t *set = &nxgep->tx_set; 1310 1311 npi_handle_t handle; 1312 nxge_status_t status; 1313 npi_status_t rs; 1314 int tdc; 1315 1316 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1317 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1318 1319 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1320 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1321 "<== nxge_txdma_mode: not initialized")); 1322 return (NXGE_ERROR); 1323 } 1324 1325 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1326 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1327 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1328 return (NXGE_ERROR); 1329 } 1330 1331 /* Enable or disable all of the TDCs owned by us. */ 1332 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1333 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1334 if ((1 << tdc) & set->owned.map) { 1335 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1336 if (ring) { 1337 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1338 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1339 if (enable) { 1340 rs = npi_txdma_channel_enable 1341 (handle, tdc); 1342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1343 "==> nxge_txdma_hw_mode: " 1344 "channel %d (enable) rs 0x%x", 1345 tdc, rs)); 1346 } else { 1347 rs = nxge_txdma_channel_disable 1348 (nxgep, tdc); 1349 } 1350 } 1351 } 1352 } 1353 1354 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1355 1356 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1357 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1358 1359 return (status); 1360 } 1361 1362 void 1363 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1364 { 1365 npi_handle_t handle; 1366 1367 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1368 "==> nxge_txdma_enable_channel: channel %d", channel)); 1369 1370 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1371 /* enable the transmit dma channels */ 1372 (void) npi_txdma_channel_enable(handle, channel); 1373 1374 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1375 } 1376 1377 void 1378 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1379 { 1380 npi_handle_t handle; 1381 1382 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1383 "==> nxge_txdma_disable_channel: channel %d", channel)); 1384 1385 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1386 /* stop the transmit dma channels */ 1387 (void) npi_txdma_channel_disable(handle, channel); 1388 1389 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1390 } 1391 1392 /* 1393 * nxge_txdma_stop_inj_err 1394 * 1395 * Stop a TDC. If at first we don't succeed, inject an error. 1396 * 1397 * Arguments: 1398 * nxgep 1399 * channel The channel to stop. 1400 * 1401 * Notes: 1402 * 1403 * NPI/NXGE function calls: 1404 * npi_txdma_channel_disable() 1405 * npi_txdma_inj_int_error_set() 1406 * #if defined(NXGE_DEBUG) 1407 * nxge_txdma_regs_dump_channels(nxgep); 1408 * #endif 1409 * 1410 * Registers accessed: 1411 * TX_CS DMC+0x40028 Transmit Control And Status 1412 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1413 * 1414 * Context: 1415 * Any domain 1416 */ 1417 int 1418 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1419 { 1420 npi_handle_t handle; 1421 tdmc_intr_dbg_t intr_dbg; 1422 int status; 1423 npi_status_t rs = NPI_SUCCESS; 1424 1425 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1426 /* 1427 * Stop the dma channel waits for the stop done. 1428 * If the stop done bit is not set, then create 1429 * an error. 1430 */ 1431 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1432 rs = npi_txdma_channel_disable(handle, channel); 1433 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1434 if (status == NXGE_OK) { 1435 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1436 "<== nxge_txdma_stop_inj_err (channel %d): " 1437 "stopped OK", channel)); 1438 return (status); 1439 } 1440 1441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1442 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1443 "injecting error", channel, rs)); 1444 /* Inject any error */ 1445 intr_dbg.value = 0; 1446 intr_dbg.bits.ldw.nack_pref = 1; 1447 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1448 1449 /* Stop done bit will be set as a result of error injection */ 1450 rs = npi_txdma_channel_disable(handle, channel); 1451 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1452 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1453 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1454 "<== nxge_txdma_stop_inj_err (channel %d): " 1455 "stopped OK ", channel)); 1456 return (status); 1457 } 1458 1459 #if defined(NXGE_DEBUG) 1460 nxge_txdma_regs_dump_channels(nxgep); 1461 #endif 1462 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1463 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1464 " (injected error but still not stopped)", channel, rs)); 1465 1466 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1467 return (status); 1468 } 1469 1470 /*ARGSUSED*/ 1471 void 1472 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1473 { 1474 nxge_grp_set_t *set = &nxgep->tx_set; 1475 int tdc; 1476 1477 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1478 1479 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1480 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1481 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1482 return; 1483 } 1484 1485 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1486 if ((1 << tdc) & set->owned.map) { 1487 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1488 if (ring) { 1489 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1490 "==> nxge_fixup_txdma_rings: channel %d", 1491 tdc)); 1492 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1493 } 1494 } 1495 } 1496 1497 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1498 } 1499 1500 /*ARGSUSED*/ 1501 void 1502 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1503 { 1504 p_tx_ring_t ring_p; 1505 1506 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1507 ring_p = nxge_txdma_get_ring(nxgep, channel); 1508 if (ring_p == NULL) { 1509 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1510 return; 1511 } 1512 1513 if (ring_p->tdc != channel) { 1514 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1515 "<== nxge_txdma_fix_channel: channel not matched " 1516 "ring tdc %d passed channel", 1517 ring_p->tdc, channel)); 1518 return; 1519 } 1520 1521 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1522 1523 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1524 } 1525 1526 /*ARGSUSED*/ 1527 void 1528 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1529 { 1530 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1531 1532 if (ring_p == NULL) { 1533 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1534 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1535 return; 1536 } 1537 1538 if (ring_p->tdc != channel) { 1539 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1540 "<== nxge_txdma_fixup_channel: channel not matched " 1541 "ring tdc %d passed channel", 1542 ring_p->tdc, channel)); 1543 return; 1544 } 1545 1546 MUTEX_ENTER(&ring_p->lock); 1547 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1548 ring_p->rd_index = 0; 1549 ring_p->wr_index = 0; 1550 ring_p->ring_head.value = 0; 1551 ring_p->ring_kick_tail.value = 0; 1552 ring_p->descs_pending = 0; 1553 MUTEX_EXIT(&ring_p->lock); 1554 1555 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1556 } 1557 1558 /*ARGSUSED*/ 1559 void 1560 nxge_txdma_hw_kick(p_nxge_t nxgep) 1561 { 1562 nxge_grp_set_t *set = &nxgep->tx_set; 1563 int tdc; 1564 1565 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1566 1567 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1568 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1569 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1570 return; 1571 } 1572 1573 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1574 if ((1 << tdc) & set->owned.map) { 1575 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1576 if (ring) { 1577 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1578 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1579 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1580 } 1581 } 1582 } 1583 1584 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1585 } 1586 1587 /*ARGSUSED*/ 1588 void 1589 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1590 { 1591 p_tx_ring_t ring_p; 1592 1593 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1594 1595 ring_p = nxge_txdma_get_ring(nxgep, channel); 1596 if (ring_p == NULL) { 1597 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1598 " nxge_txdma_kick_channel")); 1599 return; 1600 } 1601 1602 if (ring_p->tdc != channel) { 1603 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1604 "<== nxge_txdma_kick_channel: channel not matched " 1605 "ring tdc %d passed channel", 1606 ring_p->tdc, channel)); 1607 return; 1608 } 1609 1610 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1611 1612 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1613 } 1614 1615 /*ARGSUSED*/ 1616 void 1617 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1618 { 1619 1620 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1621 1622 if (ring_p == NULL) { 1623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1624 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1625 return; 1626 } 1627 1628 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1629 } 1630 1631 /* 1632 * nxge_check_tx_hang 1633 * 1634 * Check the state of all TDCs belonging to nxgep. 1635 * 1636 * Arguments: 1637 * nxgep 1638 * 1639 * Notes: 1640 * Called by nxge_hw.c:nxge_check_hw_state(). 1641 * 1642 * NPI/NXGE function calls: 1643 * 1644 * Registers accessed: 1645 * 1646 * Context: 1647 * Any domain 1648 */ 1649 /*ARGSUSED*/ 1650 void 1651 nxge_check_tx_hang(p_nxge_t nxgep) 1652 { 1653 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1654 1655 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1656 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1657 goto nxge_check_tx_hang_exit; 1658 } 1659 1660 /* 1661 * Needs inputs from hardware for regs: 1662 * head index had not moved since last timeout. 1663 * packets not transmitted or stuffed registers. 1664 */ 1665 if (nxge_txdma_hung(nxgep)) { 1666 nxge_fixup_hung_txdma_rings(nxgep); 1667 } 1668 1669 nxge_check_tx_hang_exit: 1670 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1671 } 1672 1673 /* 1674 * nxge_txdma_hung 1675 * 1676 * Reset a TDC. 1677 * 1678 * Arguments: 1679 * nxgep 1680 * channel The channel to reset. 1681 * reg_data The current TX_CS. 1682 * 1683 * Notes: 1684 * Called by nxge_check_tx_hang() 1685 * 1686 * NPI/NXGE function calls: 1687 * nxge_txdma_channel_hung() 1688 * 1689 * Registers accessed: 1690 * 1691 * Context: 1692 * Any domain 1693 */ 1694 int 1695 nxge_txdma_hung(p_nxge_t nxgep) 1696 { 1697 nxge_grp_set_t *set = &nxgep->tx_set; 1698 int tdc; 1699 boolean_t shared; 1700 1701 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1702 1703 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1704 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1705 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1706 return (B_FALSE); 1707 } 1708 1709 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1710 /* 1711 * Grab the shared state of the TDC. 1712 */ 1713 if (isLDOMservice(nxgep)) { 1714 nxge_hio_data_t *nhd = 1715 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1716 1717 MUTEX_ENTER(&nhd->lock); 1718 shared = nxgep->tdc_is_shared[tdc]; 1719 MUTEX_EXIT(&nhd->lock); 1720 } else { 1721 shared = B_FALSE; 1722 } 1723 1724 /* 1725 * Now, process continue to process. 1726 */ 1727 if (((1 << tdc) & set->owned.map) && !shared) { 1728 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1729 if (ring) { 1730 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1731 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1732 "==> nxge_txdma_hung: TDC %d hung", 1733 tdc)); 1734 return (B_TRUE); 1735 } 1736 } 1737 } 1738 } 1739 1740 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1741 1742 return (B_FALSE); 1743 } 1744 1745 /* 1746 * nxge_txdma_channel_hung 1747 * 1748 * Reset a TDC. 1749 * 1750 * Arguments: 1751 * nxgep 1752 * ring <channel>'s ring. 1753 * channel The channel to reset. 1754 * 1755 * Notes: 1756 * Called by nxge_txdma.c:nxge_txdma_hung() 1757 * 1758 * NPI/NXGE function calls: 1759 * npi_txdma_ring_head_get() 1760 * 1761 * Registers accessed: 1762 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1763 * 1764 * Context: 1765 * Any domain 1766 */ 1767 int 1768 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1769 { 1770 uint16_t head_index, tail_index; 1771 boolean_t head_wrap, tail_wrap; 1772 npi_handle_t handle; 1773 tx_ring_hdl_t tx_head; 1774 uint_t tx_rd_index; 1775 1776 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1777 1778 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1779 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1780 "==> nxge_txdma_channel_hung: channel %d", channel)); 1781 MUTEX_ENTER(&tx_ring_p->lock); 1782 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1783 1784 tail_index = tx_ring_p->wr_index; 1785 tail_wrap = tx_ring_p->wr_index_wrap; 1786 tx_rd_index = tx_ring_p->rd_index; 1787 MUTEX_EXIT(&tx_ring_p->lock); 1788 1789 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1790 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1791 "tail_index %d tail_wrap %d ", 1792 channel, tx_rd_index, tail_index, tail_wrap)); 1793 /* 1794 * Read the hardware maintained transmit head 1795 * and wrap around bit. 1796 */ 1797 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1798 head_index = tx_head.bits.ldw.head; 1799 head_wrap = tx_head.bits.ldw.wrap; 1800 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1801 "==> nxge_txdma_channel_hung: " 1802 "tx_rd_index %d tail %d tail_wrap %d " 1803 "head %d wrap %d", 1804 tx_rd_index, tail_index, tail_wrap, 1805 head_index, head_wrap)); 1806 1807 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1808 tail_index, tail_wrap) && 1809 (head_index == tx_rd_index)) { 1810 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1811 "==> nxge_txdma_channel_hung: EMPTY")); 1812 return (B_FALSE); 1813 } 1814 1815 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1816 "==> nxge_txdma_channel_hung: Checking if ring full")); 1817 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1818 tail_wrap)) { 1819 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1820 "==> nxge_txdma_channel_hung: full")); 1821 return (B_TRUE); 1822 } 1823 1824 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1825 1826 return (B_FALSE); 1827 } 1828 1829 /* 1830 * nxge_fixup_hung_txdma_rings 1831 * 1832 * Disable a TDC. 1833 * 1834 * Arguments: 1835 * nxgep 1836 * channel The channel to reset. 1837 * reg_data The current TX_CS. 1838 * 1839 * Notes: 1840 * Called by nxge_check_tx_hang() 1841 * 1842 * NPI/NXGE function calls: 1843 * npi_txdma_ring_head_get() 1844 * 1845 * Registers accessed: 1846 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1847 * 1848 * Context: 1849 * Any domain 1850 */ 1851 /*ARGSUSED*/ 1852 void 1853 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1854 { 1855 nxge_grp_set_t *set = &nxgep->tx_set; 1856 int tdc; 1857 1858 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1859 1860 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1861 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1862 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1863 return; 1864 } 1865 1866 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1867 if ((1 << tdc) & set->owned.map) { 1868 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1869 if (ring) { 1870 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1871 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1872 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1873 tdc)); 1874 } 1875 } 1876 } 1877 1878 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1879 } 1880 1881 /* 1882 * nxge_txdma_fixup_hung_channel 1883 * 1884 * 'Fix' a hung TDC. 1885 * 1886 * Arguments: 1887 * nxgep 1888 * channel The channel to fix. 1889 * 1890 * Notes: 1891 * Called by nxge_fixup_hung_txdma_rings() 1892 * 1893 * 1. Reclaim the TDC. 1894 * 2. Disable the TDC. 1895 * 1896 * NPI/NXGE function calls: 1897 * nxge_txdma_reclaim() 1898 * npi_txdma_channel_disable(TX_CS) 1899 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1900 * 1901 * Registers accessed: 1902 * TX_CS DMC+0x40028 Transmit Control And Status 1903 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1904 * 1905 * Context: 1906 * Any domain 1907 */ 1908 /*ARGSUSED*/ 1909 void 1910 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1911 { 1912 p_tx_ring_t ring_p; 1913 1914 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1915 ring_p = nxge_txdma_get_ring(nxgep, channel); 1916 if (ring_p == NULL) { 1917 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1918 "<== nxge_txdma_fix_hung_channel")); 1919 return; 1920 } 1921 1922 if (ring_p->tdc != channel) { 1923 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1924 "<== nxge_txdma_fix_hung_channel: channel not matched " 1925 "ring tdc %d passed channel", 1926 ring_p->tdc, channel)); 1927 return; 1928 } 1929 1930 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1931 1932 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1933 } 1934 1935 /*ARGSUSED*/ 1936 void 1937 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1938 uint16_t channel) 1939 { 1940 npi_handle_t handle; 1941 tdmc_intr_dbg_t intr_dbg; 1942 int status = NXGE_OK; 1943 1944 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1945 1946 if (ring_p == NULL) { 1947 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1948 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1949 return; 1950 } 1951 1952 if (ring_p->tdc != channel) { 1953 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1954 "<== nxge_txdma_fixup_hung_channel: channel " 1955 "not matched " 1956 "ring tdc %d passed channel", 1957 ring_p->tdc, channel)); 1958 return; 1959 } 1960 1961 /* Reclaim descriptors */ 1962 MUTEX_ENTER(&ring_p->lock); 1963 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1964 MUTEX_EXIT(&ring_p->lock); 1965 1966 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1967 /* 1968 * Stop the dma channel waits for the stop done. 1969 * If the stop done bit is not set, then force 1970 * an error. 1971 */ 1972 status = npi_txdma_channel_disable(handle, channel); 1973 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1975 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1976 "ring tdc %d passed channel %d", 1977 ring_p->tdc, channel)); 1978 return; 1979 } 1980 1981 /* Inject any error */ 1982 intr_dbg.value = 0; 1983 intr_dbg.bits.ldw.nack_pref = 1; 1984 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1985 1986 /* Stop done bit will be set as a result of error injection */ 1987 status = npi_txdma_channel_disable(handle, channel); 1988 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1989 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1990 "<== nxge_txdma_fixup_hung_channel: stopped again" 1991 "ring tdc %d passed channel", 1992 ring_p->tdc, channel)); 1993 return; 1994 } 1995 1996 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1997 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1998 "ring tdc %d passed channel", 1999 ring_p->tdc, channel)); 2000 2001 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 2002 } 2003 2004 /*ARGSUSED*/ 2005 void 2006 nxge_reclaim_rings(p_nxge_t nxgep) 2007 { 2008 nxge_grp_set_t *set = &nxgep->tx_set; 2009 int tdc; 2010 2011 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 2012 2013 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2014 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2015 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2016 return; 2017 } 2018 2019 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2020 if ((1 << tdc) & set->owned.map) { 2021 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2022 if (ring) { 2023 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2024 "==> nxge_reclaim_rings: TDC %d", tdc)); 2025 MUTEX_ENTER(&ring->lock); 2026 (void) nxge_txdma_reclaim(nxgep, ring, 0); 2027 MUTEX_EXIT(&ring->lock); 2028 } 2029 } 2030 } 2031 2032 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 2033 } 2034 2035 void 2036 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 2037 { 2038 nxge_grp_set_t *set = &nxgep->tx_set; 2039 npi_handle_t handle; 2040 int tdc; 2041 2042 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 2043 2044 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2045 2046 if (!isLDOMguest(nxgep)) { 2047 (void) npi_txdma_dump_fzc_regs(handle); 2048 2049 /* Dump TXC registers. */ 2050 (void) npi_txc_dump_fzc_regs(handle); 2051 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2052 } 2053 2054 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2055 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2056 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2057 return; 2058 } 2059 2060 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2061 if ((1 << tdc) & set->owned.map) { 2062 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2063 if (ring) { 2064 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2065 "==> nxge_txdma_regs_dump_channels: " 2066 "TDC %d", tdc)); 2067 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2068 2069 /* Dump TXC registers, if able to. */ 2070 if (!isLDOMguest(nxgep)) { 2071 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2072 "==> nxge_txdma_regs_dump_channels:" 2073 " FZC TDC %d", tdc)); 2074 (void) npi_txc_dump_tdc_fzc_regs 2075 (handle, tdc); 2076 } 2077 nxge_txdma_regs_dump(nxgep, tdc); 2078 } 2079 } 2080 } 2081 2082 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2083 } 2084 2085 void 2086 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2087 { 2088 npi_handle_t handle; 2089 tx_ring_hdl_t hdl; 2090 tx_ring_kick_t kick; 2091 tx_cs_t cs; 2092 txc_control_t control; 2093 uint32_t bitmap = 0; 2094 uint32_t burst = 0; 2095 uint32_t bytes = 0; 2096 dma_log_page_t cfg; 2097 2098 printf("\n\tfunc # %d tdc %d ", 2099 nxgep->function_num, channel); 2100 cfg.page_num = 0; 2101 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2102 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2103 printf("\n\tlog page func %d valid page 0 %d", 2104 cfg.func_num, cfg.valid); 2105 cfg.page_num = 1; 2106 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2107 printf("\n\tlog page func %d valid page 1 %d", 2108 cfg.func_num, cfg.valid); 2109 2110 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2111 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2112 printf("\n\thead value is 0x%0llx", 2113 (long long)hdl.value); 2114 printf("\n\thead index %d", hdl.bits.ldw.head); 2115 printf("\n\tkick value is 0x%0llx", 2116 (long long)kick.value); 2117 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2118 2119 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2120 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2121 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2122 2123 (void) npi_txc_control(handle, OP_GET, &control); 2124 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2125 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2126 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2127 2128 printf("\n\tTXC port control 0x%0llx", 2129 (long long)control.value); 2130 printf("\n\tTXC port bitmap 0x%x", bitmap); 2131 printf("\n\tTXC max burst %d", burst); 2132 printf("\n\tTXC bytes xmt %d\n", bytes); 2133 2134 { 2135 ipp_status_t status; 2136 2137 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2138 #if defined(__i386) 2139 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2140 #else 2141 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2142 #endif 2143 } 2144 } 2145 2146 /* 2147 * nxge_tdc_hvio_setup 2148 * 2149 * I'm not exactly sure what this code does. 2150 * 2151 * Arguments: 2152 * nxgep 2153 * channel The channel to map. 2154 * 2155 * Notes: 2156 * 2157 * NPI/NXGE function calls: 2158 * na 2159 * 2160 * Context: 2161 * Service domain? 2162 */ 2163 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2164 static void 2165 nxge_tdc_hvio_setup( 2166 nxge_t *nxgep, int channel) 2167 { 2168 nxge_dma_common_t *data; 2169 nxge_dma_common_t *control; 2170 tx_ring_t *ring; 2171 2172 ring = nxgep->tx_rings->rings[channel]; 2173 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2174 2175 ring->hv_set = B_FALSE; 2176 2177 ring->hv_tx_buf_base_ioaddr_pp = 2178 (uint64_t)data->orig_ioaddr_pp; 2179 ring->hv_tx_buf_ioaddr_size = 2180 (uint64_t)data->orig_alength; 2181 2182 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2183 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2184 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2185 ring->hv_tx_buf_base_ioaddr_pp, 2186 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2187 data->ioaddr_pp, data->orig_vatopa, 2188 data->orig_alength, data->orig_alength)); 2189 2190 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2191 2192 ring->hv_tx_cntl_base_ioaddr_pp = 2193 (uint64_t)control->orig_ioaddr_pp; 2194 ring->hv_tx_cntl_ioaddr_size = 2195 (uint64_t)control->orig_alength; 2196 2197 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2198 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2199 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2200 ring->hv_tx_cntl_base_ioaddr_pp, 2201 control->orig_ioaddr_pp, control->orig_vatopa, 2202 ring->hv_tx_cntl_ioaddr_size, 2203 control->orig_alength, control->orig_alength)); 2204 } 2205 #endif 2206 2207 static nxge_status_t 2208 nxge_map_txdma(p_nxge_t nxgep, int channel) 2209 { 2210 nxge_dma_common_t **pData; 2211 nxge_dma_common_t **pControl; 2212 tx_ring_t **pRing, *ring; 2213 tx_mbox_t **mailbox; 2214 uint32_t num_chunks; 2215 2216 nxge_status_t status = NXGE_OK; 2217 2218 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2219 2220 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2221 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2222 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2223 "<== nxge_map_txdma: buf not allocated")); 2224 return (NXGE_ERROR); 2225 } 2226 } 2227 2228 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2229 return (NXGE_ERROR); 2230 2231 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2232 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2233 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2234 pRing = &nxgep->tx_rings->rings[channel]; 2235 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2236 2237 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2238 "tx_rings $%p tx_desc_rings $%p", 2239 nxgep->tx_rings, nxgep->tx_rings->rings)); 2240 2241 /* 2242 * Map descriptors from the buffer pools for <channel>. 2243 */ 2244 2245 /* 2246 * Set up and prepare buffer blocks, descriptors 2247 * and mailbox. 2248 */ 2249 status = nxge_map_txdma_channel(nxgep, channel, 2250 pData, pRing, num_chunks, pControl, mailbox); 2251 if (status != NXGE_OK) { 2252 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2253 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2254 "returned 0x%x", 2255 nxgep, channel, status)); 2256 return (status); 2257 } 2258 2259 ring = *pRing; 2260 2261 ring->index = (uint16_t)channel; 2262 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2263 2264 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2265 if (isLDOMguest(nxgep)) { 2266 (void) nxge_tdc_lp_conf(nxgep, channel); 2267 } else { 2268 nxge_tdc_hvio_setup(nxgep, channel); 2269 } 2270 #endif 2271 2272 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2273 "(status 0x%x channel %d)", status, channel)); 2274 2275 return (status); 2276 } 2277 2278 static nxge_status_t 2279 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2280 p_nxge_dma_common_t *dma_buf_p, 2281 p_tx_ring_t *tx_desc_p, 2282 uint32_t num_chunks, 2283 p_nxge_dma_common_t *dma_cntl_p, 2284 p_tx_mbox_t *tx_mbox_p) 2285 { 2286 int status = NXGE_OK; 2287 2288 /* 2289 * Set up and prepare buffer blocks, descriptors 2290 * and mailbox. 2291 */ 2292 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2293 "==> nxge_map_txdma_channel (channel %d)", channel)); 2294 /* 2295 * Transmit buffer blocks 2296 */ 2297 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2298 dma_buf_p, tx_desc_p, num_chunks); 2299 if (status != NXGE_OK) { 2300 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2301 "==> nxge_map_txdma_channel (channel %d): " 2302 "map buffer failed 0x%x", channel, status)); 2303 goto nxge_map_txdma_channel_exit; 2304 } 2305 2306 /* 2307 * Transmit block ring, and mailbox. 2308 */ 2309 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2310 tx_mbox_p); 2311 2312 goto nxge_map_txdma_channel_exit; 2313 2314 nxge_map_txdma_channel_fail1: 2315 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2316 "==> nxge_map_txdma_channel: unmap buf" 2317 "(status 0x%x channel %d)", 2318 status, channel)); 2319 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2320 2321 nxge_map_txdma_channel_exit: 2322 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2323 "<== nxge_map_txdma_channel: " 2324 "(status 0x%x channel %d)", 2325 status, channel)); 2326 2327 return (status); 2328 } 2329 2330 /*ARGSUSED*/ 2331 static void 2332 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2333 { 2334 tx_ring_t *ring; 2335 tx_mbox_t *mailbox; 2336 2337 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2338 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2339 /* 2340 * unmap tx block ring, and mailbox. 2341 */ 2342 ring = nxgep->tx_rings->rings[channel]; 2343 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2344 2345 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2346 2347 /* unmap buffer blocks */ 2348 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2349 2350 nxge_free_txb(nxgep, channel); 2351 2352 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2353 } 2354 2355 /* 2356 * nxge_map_txdma_channel_cfg_ring 2357 * 2358 * Map a TDC into our kernel space. 2359 * This function allocates all of the per-channel data structures. 2360 * 2361 * Arguments: 2362 * nxgep 2363 * dma_channel The channel to map. 2364 * dma_cntl_p 2365 * tx_ring_p dma_channel's transmit ring 2366 * tx_mbox_p dma_channel's mailbox 2367 * 2368 * Notes: 2369 * 2370 * NPI/NXGE function calls: 2371 * nxge_setup_dma_common() 2372 * 2373 * Registers accessed: 2374 * none. 2375 * 2376 * Context: 2377 * Any domain 2378 */ 2379 /*ARGSUSED*/ 2380 static void 2381 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2382 p_nxge_dma_common_t *dma_cntl_p, 2383 p_tx_ring_t tx_ring_p, 2384 p_tx_mbox_t *tx_mbox_p) 2385 { 2386 p_tx_mbox_t mboxp; 2387 p_nxge_dma_common_t cntl_dmap; 2388 p_nxge_dma_common_t dmap; 2389 p_tx_rng_cfig_t tx_ring_cfig_p; 2390 p_tx_ring_kick_t tx_ring_kick_p; 2391 p_tx_cs_t tx_cs_p; 2392 p_tx_dma_ent_msk_t tx_evmask_p; 2393 p_txdma_mbh_t mboxh_p; 2394 p_txdma_mbl_t mboxl_p; 2395 uint64_t tx_desc_len; 2396 2397 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2398 "==> nxge_map_txdma_channel_cfg_ring")); 2399 2400 cntl_dmap = *dma_cntl_p; 2401 2402 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2403 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2404 sizeof (tx_desc_t)); 2405 /* 2406 * Zero out transmit ring descriptors. 2407 */ 2408 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2409 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2410 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2411 tx_cs_p = &(tx_ring_p->tx_cs); 2412 tx_evmask_p = &(tx_ring_p->tx_evmask); 2413 tx_ring_cfig_p->value = 0; 2414 tx_ring_kick_p->value = 0; 2415 tx_cs_p->value = 0; 2416 tx_evmask_p->value = 0; 2417 2418 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2419 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2420 dma_channel, 2421 dmap->dma_cookie.dmac_laddress)); 2422 2423 tx_ring_cfig_p->value = 0; 2424 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2425 tx_ring_cfig_p->value = 2426 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2427 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2428 2429 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2430 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2431 dma_channel, 2432 tx_ring_cfig_p->value)); 2433 2434 tx_cs_p->bits.ldw.rst = 1; 2435 2436 /* Map in mailbox */ 2437 mboxp = (p_tx_mbox_t) 2438 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2439 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2440 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2441 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2442 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2443 mboxh_p->value = mboxl_p->value = 0; 2444 2445 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2446 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2447 dmap->dma_cookie.dmac_laddress)); 2448 2449 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2450 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2451 2452 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2453 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2454 2455 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2456 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2457 dmap->dma_cookie.dmac_laddress)); 2458 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2459 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2460 "mbox $%p", 2461 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2462 tx_ring_p->page_valid.value = 0; 2463 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2464 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2465 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2466 tx_ring_p->page_hdl.value = 0; 2467 2468 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2469 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2470 2471 tx_ring_p->max_burst.value = 0; 2472 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2473 2474 *tx_mbox_p = mboxp; 2475 2476 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2477 "<== nxge_map_txdma_channel_cfg_ring")); 2478 } 2479 2480 /*ARGSUSED*/ 2481 static void 2482 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2483 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2484 { 2485 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2486 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2487 tx_ring_p->tdc)); 2488 2489 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2490 2491 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2492 "<== nxge_unmap_txdma_channel_cfg_ring")); 2493 } 2494 2495 /* 2496 * nxge_map_txdma_channel_buf_ring 2497 * 2498 * 2499 * Arguments: 2500 * nxgep 2501 * channel The channel to map. 2502 * dma_buf_p 2503 * tx_desc_p channel's descriptor ring 2504 * num_chunks 2505 * 2506 * Notes: 2507 * 2508 * NPI/NXGE function calls: 2509 * nxge_setup_dma_common() 2510 * 2511 * Registers accessed: 2512 * none. 2513 * 2514 * Context: 2515 * Any domain 2516 */ 2517 static nxge_status_t 2518 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2519 p_nxge_dma_common_t *dma_buf_p, 2520 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2521 { 2522 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2523 p_nxge_dma_common_t dmap; 2524 nxge_os_dma_handle_t tx_buf_dma_handle; 2525 p_tx_ring_t tx_ring_p; 2526 p_tx_msg_t tx_msg_ring; 2527 nxge_status_t status = NXGE_OK; 2528 int ddi_status = DDI_SUCCESS; 2529 int i, j, index; 2530 uint32_t size, bsize; 2531 uint32_t nblocks, nmsgs; 2532 char qname[TASKQ_NAMELEN]; 2533 2534 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2535 "==> nxge_map_txdma_channel_buf_ring")); 2536 2537 dma_bufp = tmp_bufp = *dma_buf_p; 2538 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2539 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2540 "chunks bufp $%p", 2541 channel, num_chunks, dma_bufp)); 2542 2543 nmsgs = 0; 2544 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2545 nmsgs += tmp_bufp->nblocks; 2546 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2547 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2548 "bufp $%p nblocks %d nmsgs %d", 2549 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2550 } 2551 if (!nmsgs) { 2552 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2553 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2554 "no msg blocks", 2555 channel)); 2556 status = NXGE_ERROR; 2557 goto nxge_map_txdma_channel_buf_ring_exit; 2558 } 2559 2560 tx_ring_p = (p_tx_ring_t) 2561 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2562 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2563 (void *)nxgep->interrupt_cookie); 2564 2565 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 2566 tx_ring_p->tx_ring_busy = B_FALSE; 2567 tx_ring_p->nxgep = nxgep; 2568 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL; 2569 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d", 2570 nxgep->instance, channel); 2571 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1, 2572 TASKQ_DEFAULTPRI, 0); 2573 if (tx_ring_p->taskq == NULL) { 2574 goto nxge_map_txdma_channel_buf_ring_fail1; 2575 } 2576 2577 /* 2578 * Allocate transmit message rings and handles for packets 2579 * not to be copied to premapped buffers. 2580 */ 2581 size = nmsgs * sizeof (tx_msg_t); 2582 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2583 for (i = 0; i < nmsgs; i++) { 2584 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2585 DDI_DMA_DONTWAIT, 0, 2586 &tx_msg_ring[i].dma_handle); 2587 if (ddi_status != DDI_SUCCESS) { 2588 status |= NXGE_DDI_FAILED; 2589 break; 2590 } 2591 } 2592 if (i < nmsgs) { 2593 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2594 "Allocate handles failed.")); 2595 goto nxge_map_txdma_channel_buf_ring_fail1; 2596 } 2597 2598 tx_ring_p->tdc = channel; 2599 tx_ring_p->tx_msg_ring = tx_msg_ring; 2600 tx_ring_p->tx_ring_size = nmsgs; 2601 tx_ring_p->num_chunks = num_chunks; 2602 if (!nxge_tx_intr_thres) { 2603 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2604 } 2605 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2606 tx_ring_p->rd_index = 0; 2607 tx_ring_p->wr_index = 0; 2608 tx_ring_p->ring_head.value = 0; 2609 tx_ring_p->ring_kick_tail.value = 0; 2610 tx_ring_p->descs_pending = 0; 2611 2612 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2613 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2614 "actual tx desc max %d nmsgs %d " 2615 "(config nxge_tx_ring_size %d)", 2616 channel, tx_ring_p->tx_ring_size, nmsgs, 2617 nxge_tx_ring_size)); 2618 2619 /* 2620 * Map in buffers from the buffer pool. 2621 */ 2622 index = 0; 2623 bsize = dma_bufp->block_size; 2624 2625 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2626 "dma_bufp $%p tx_rng_p $%p " 2627 "tx_msg_rng_p $%p bsize %d", 2628 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2629 2630 tx_buf_dma_handle = dma_bufp->dma_handle; 2631 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2632 bsize = dma_bufp->block_size; 2633 nblocks = dma_bufp->nblocks; 2634 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2635 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2636 "size %d dma_bufp $%p", 2637 i, sizeof (nxge_dma_common_t), dma_bufp)); 2638 2639 for (j = 0; j < nblocks; j++) { 2640 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2641 dmap = &tx_msg_ring[index++].buf_dma; 2642 #ifdef TX_MEM_DEBUG 2643 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2644 "==> nxge_map_txdma_channel_buf_ring: j %d" 2645 "dmap $%p", i, dmap)); 2646 #endif 2647 nxge_setup_dma_common(dmap, dma_bufp, 1, 2648 bsize); 2649 } 2650 } 2651 2652 if (i < num_chunks) { 2653 status = NXGE_ERROR; 2654 goto nxge_map_txdma_channel_buf_ring_fail1; 2655 } 2656 2657 *tx_desc_p = tx_ring_p; 2658 2659 goto nxge_map_txdma_channel_buf_ring_exit; 2660 2661 nxge_map_txdma_channel_buf_ring_fail1: 2662 if (tx_ring_p->taskq) { 2663 ddi_taskq_destroy(tx_ring_p->taskq); 2664 tx_ring_p->taskq = NULL; 2665 } 2666 2667 index--; 2668 for (; index >= 0; index--) { 2669 if (tx_msg_ring[index].dma_handle != NULL) { 2670 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2671 } 2672 } 2673 MUTEX_DESTROY(&tx_ring_p->lock); 2674 KMEM_FREE(tx_msg_ring, size); 2675 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2676 2677 status = NXGE_ERROR; 2678 2679 nxge_map_txdma_channel_buf_ring_exit: 2680 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2681 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2682 2683 return (status); 2684 } 2685 2686 /*ARGSUSED*/ 2687 static void 2688 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2689 { 2690 p_tx_msg_t tx_msg_ring; 2691 p_tx_msg_t tx_msg_p; 2692 int i; 2693 2694 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2695 "==> nxge_unmap_txdma_channel_buf_ring")); 2696 if (tx_ring_p == NULL) { 2697 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2698 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2699 return; 2700 } 2701 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2702 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2703 tx_ring_p->tdc)); 2704 2705 tx_msg_ring = tx_ring_p->tx_msg_ring; 2706 2707 /* 2708 * Since the serialization thread, timer thread and 2709 * interrupt thread can all call the transmit reclaim, 2710 * the unmapping function needs to acquire the lock 2711 * to free those buffers which were transmitted 2712 * by the hardware already. 2713 */ 2714 MUTEX_ENTER(&tx_ring_p->lock); 2715 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2716 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2717 "channel %d", 2718 tx_ring_p->tdc)); 2719 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2720 2721 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2722 tx_msg_p = &tx_msg_ring[i]; 2723 if (tx_msg_p->tx_message != NULL) { 2724 freemsg(tx_msg_p->tx_message); 2725 tx_msg_p->tx_message = NULL; 2726 } 2727 } 2728 2729 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2730 if (tx_msg_ring[i].dma_handle != NULL) { 2731 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2732 } 2733 tx_msg_ring[i].dma_handle = NULL; 2734 } 2735 2736 MUTEX_EXIT(&tx_ring_p->lock); 2737 2738 if (tx_ring_p->taskq) { 2739 ddi_taskq_destroy(tx_ring_p->taskq); 2740 tx_ring_p->taskq = NULL; 2741 } 2742 2743 MUTEX_DESTROY(&tx_ring_p->lock); 2744 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2745 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2746 2747 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2748 "<== nxge_unmap_txdma_channel_buf_ring")); 2749 } 2750 2751 static nxge_status_t 2752 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2753 { 2754 p_tx_rings_t tx_rings; 2755 p_tx_ring_t *tx_desc_rings; 2756 p_tx_mbox_areas_t tx_mbox_areas_p; 2757 p_tx_mbox_t *tx_mbox_p; 2758 nxge_status_t status = NXGE_OK; 2759 2760 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2761 2762 tx_rings = nxgep->tx_rings; 2763 if (tx_rings == NULL) { 2764 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2765 "<== nxge_txdma_hw_start: NULL ring pointer")); 2766 return (NXGE_ERROR); 2767 } 2768 tx_desc_rings = tx_rings->rings; 2769 if (tx_desc_rings == NULL) { 2770 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2771 "<== nxge_txdma_hw_start: NULL ring pointers")); 2772 return (NXGE_ERROR); 2773 } 2774 2775 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2776 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2777 2778 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2779 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2780 2781 status = nxge_txdma_start_channel(nxgep, channel, 2782 (p_tx_ring_t)tx_desc_rings[channel], 2783 (p_tx_mbox_t)tx_mbox_p[channel]); 2784 if (status != NXGE_OK) { 2785 goto nxge_txdma_hw_start_fail1; 2786 } 2787 2788 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2789 "tx_rings $%p rings $%p", 2790 nxgep->tx_rings, nxgep->tx_rings->rings)); 2791 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2792 "tx_rings $%p tx_desc_rings $%p", 2793 nxgep->tx_rings, tx_desc_rings)); 2794 2795 goto nxge_txdma_hw_start_exit; 2796 2797 nxge_txdma_hw_start_fail1: 2798 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2799 "==> nxge_txdma_hw_start: disable " 2800 "(status 0x%x channel %d)", status, channel)); 2801 2802 nxge_txdma_hw_start_exit: 2803 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2804 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2805 2806 return (status); 2807 } 2808 2809 /* 2810 * nxge_txdma_start_channel 2811 * 2812 * Start a TDC. 2813 * 2814 * Arguments: 2815 * nxgep 2816 * channel The channel to start. 2817 * tx_ring_p channel's transmit descriptor ring. 2818 * tx_mbox_p channel' smailbox. 2819 * 2820 * Notes: 2821 * 2822 * NPI/NXGE function calls: 2823 * nxge_reset_txdma_channel() 2824 * nxge_init_txdma_channel_event_mask() 2825 * nxge_enable_txdma_channel() 2826 * 2827 * Registers accessed: 2828 * none directly (see functions above). 2829 * 2830 * Context: 2831 * Any domain 2832 */ 2833 static nxge_status_t 2834 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2835 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2836 2837 { 2838 nxge_status_t status = NXGE_OK; 2839 2840 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2841 "==> nxge_txdma_start_channel (channel %d)", channel)); 2842 /* 2843 * TXDMA/TXC must be in stopped state. 2844 */ 2845 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2846 2847 /* 2848 * Reset TXDMA channel 2849 */ 2850 tx_ring_p->tx_cs.value = 0; 2851 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2852 status = nxge_reset_txdma_channel(nxgep, channel, 2853 tx_ring_p->tx_cs.value); 2854 if (status != NXGE_OK) { 2855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2856 "==> nxge_txdma_start_channel (channel %d)" 2857 " reset channel failed 0x%x", channel, status)); 2858 goto nxge_txdma_start_channel_exit; 2859 } 2860 2861 /* 2862 * Initialize the TXDMA channel specific FZC control 2863 * configurations. These FZC registers are pertaining 2864 * to each TX channel (i.e. logical pages). 2865 */ 2866 if (!isLDOMguest(nxgep)) { 2867 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2868 tx_ring_p, tx_mbox_p); 2869 if (status != NXGE_OK) { 2870 goto nxge_txdma_start_channel_exit; 2871 } 2872 } 2873 2874 /* 2875 * Initialize the event masks. 2876 */ 2877 tx_ring_p->tx_evmask.value = 0; 2878 status = nxge_init_txdma_channel_event_mask(nxgep, 2879 channel, &tx_ring_p->tx_evmask); 2880 if (status != NXGE_OK) { 2881 goto nxge_txdma_start_channel_exit; 2882 } 2883 2884 /* 2885 * Load TXDMA descriptors, buffers, mailbox, 2886 * initialise the DMA channels and 2887 * enable each DMA channel. 2888 */ 2889 status = nxge_enable_txdma_channel(nxgep, channel, 2890 tx_ring_p, tx_mbox_p); 2891 if (status != NXGE_OK) { 2892 goto nxge_txdma_start_channel_exit; 2893 } 2894 2895 nxge_txdma_start_channel_exit: 2896 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2897 2898 return (status); 2899 } 2900 2901 /* 2902 * nxge_txdma_stop_channel 2903 * 2904 * Stop a TDC. 2905 * 2906 * Arguments: 2907 * nxgep 2908 * channel The channel to stop. 2909 * tx_ring_p channel's transmit descriptor ring. 2910 * tx_mbox_p channel' smailbox. 2911 * 2912 * Notes: 2913 * 2914 * NPI/NXGE function calls: 2915 * nxge_txdma_stop_inj_err() 2916 * nxge_reset_txdma_channel() 2917 * nxge_init_txdma_channel_event_mask() 2918 * nxge_init_txdma_channel_cntl_stat() 2919 * nxge_disable_txdma_channel() 2920 * 2921 * Registers accessed: 2922 * none directly (see functions above). 2923 * 2924 * Context: 2925 * Any domain 2926 */ 2927 /*ARGSUSED*/ 2928 static nxge_status_t 2929 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2930 { 2931 p_tx_ring_t tx_ring_p; 2932 int status = NXGE_OK; 2933 2934 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2935 "==> nxge_txdma_stop_channel: channel %d", channel)); 2936 2937 /* 2938 * Stop (disable) TXDMA and TXC (if stop bit is set 2939 * and STOP_N_GO bit not set, the TXDMA reset state will 2940 * not be set if reset TXDMA. 2941 */ 2942 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2943 2944 tx_ring_p = nxgep->tx_rings->rings[channel]; 2945 2946 /* 2947 * Reset TXDMA channel 2948 */ 2949 tx_ring_p->tx_cs.value = 0; 2950 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2951 status = nxge_reset_txdma_channel(nxgep, channel, 2952 tx_ring_p->tx_cs.value); 2953 if (status != NXGE_OK) { 2954 goto nxge_txdma_stop_channel_exit; 2955 } 2956 2957 #ifdef HARDWARE_REQUIRED 2958 /* Set up the interrupt event masks. */ 2959 tx_ring_p->tx_evmask.value = 0; 2960 status = nxge_init_txdma_channel_event_mask(nxgep, 2961 channel, &tx_ring_p->tx_evmask); 2962 if (status != NXGE_OK) { 2963 goto nxge_txdma_stop_channel_exit; 2964 } 2965 2966 /* Initialize the DMA control and status register */ 2967 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2968 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2969 tx_ring_p->tx_cs.value); 2970 if (status != NXGE_OK) { 2971 goto nxge_txdma_stop_channel_exit; 2972 } 2973 2974 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2975 2976 /* Disable channel */ 2977 status = nxge_disable_txdma_channel(nxgep, channel, 2978 tx_ring_p, tx_mbox_p); 2979 if (status != NXGE_OK) { 2980 goto nxge_txdma_start_channel_exit; 2981 } 2982 2983 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2984 "==> nxge_txdma_stop_channel: event done")); 2985 2986 #endif 2987 2988 nxge_txdma_stop_channel_exit: 2989 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2990 return (status); 2991 } 2992 2993 /* 2994 * nxge_txdma_get_ring 2995 * 2996 * Get the ring for a TDC. 2997 * 2998 * Arguments: 2999 * nxgep 3000 * channel 3001 * 3002 * Notes: 3003 * 3004 * NPI/NXGE function calls: 3005 * 3006 * Registers accessed: 3007 * 3008 * Context: 3009 * Any domain 3010 */ 3011 static p_tx_ring_t 3012 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 3013 { 3014 nxge_grp_set_t *set = &nxgep->tx_set; 3015 int tdc; 3016 3017 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 3018 3019 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3020 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3021 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 3022 goto return_null; 3023 } 3024 3025 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3026 if ((1 << tdc) & set->owned.map) { 3027 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3028 if (ring) { 3029 if (channel == ring->tdc) { 3030 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3031 "<== nxge_txdma_get_ring: " 3032 "tdc %d ring $%p", tdc, ring)); 3033 return (ring); 3034 } 3035 } 3036 } 3037 } 3038 3039 return_null: 3040 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 3041 "ring not found")); 3042 3043 return (NULL); 3044 } 3045 3046 /* 3047 * nxge_txdma_get_mbox 3048 * 3049 * Get the mailbox for a TDC. 3050 * 3051 * Arguments: 3052 * nxgep 3053 * channel 3054 * 3055 * Notes: 3056 * 3057 * NPI/NXGE function calls: 3058 * 3059 * Registers accessed: 3060 * 3061 * Context: 3062 * Any domain 3063 */ 3064 static p_tx_mbox_t 3065 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3066 { 3067 nxge_grp_set_t *set = &nxgep->tx_set; 3068 int tdc; 3069 3070 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3071 3072 if (nxgep->tx_mbox_areas_p == 0 || 3073 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3074 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3075 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3076 goto return_null; 3077 } 3078 3079 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3080 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3081 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3082 goto return_null; 3083 } 3084 3085 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3086 if ((1 << tdc) & set->owned.map) { 3087 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3088 if (ring) { 3089 if (channel == ring->tdc) { 3090 tx_mbox_t *mailbox = nxgep-> 3091 tx_mbox_areas_p-> 3092 txmbox_areas_p[tdc]; 3093 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3094 "<== nxge_txdma_get_mbox: tdc %d " 3095 "ring $%p", tdc, mailbox)); 3096 return (mailbox); 3097 } 3098 } 3099 } 3100 } 3101 3102 return_null: 3103 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3104 "mailbox not found")); 3105 3106 return (NULL); 3107 } 3108 3109 /* 3110 * nxge_tx_err_evnts 3111 * 3112 * Recover a TDC. 3113 * 3114 * Arguments: 3115 * nxgep 3116 * index The index to the TDC ring. 3117 * ldvp Used to get the channel number ONLY. 3118 * cs A copy of the bits from TX_CS. 3119 * 3120 * Notes: 3121 * Calling tree: 3122 * nxge_tx_intr() 3123 * 3124 * NPI/NXGE function calls: 3125 * npi_txdma_ring_error_get() 3126 * npi_txdma_inj_par_error_get() 3127 * nxge_txdma_fatal_err_recover() 3128 * 3129 * Registers accessed: 3130 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3131 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3132 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3133 * 3134 * Context: 3135 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3136 */ 3137 /*ARGSUSED*/ 3138 static nxge_status_t 3139 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3140 { 3141 npi_handle_t handle; 3142 npi_status_t rs; 3143 uint8_t channel; 3144 p_tx_ring_t *tx_rings; 3145 p_tx_ring_t tx_ring_p; 3146 p_nxge_tx_ring_stats_t tdc_stats; 3147 boolean_t txchan_fatal = B_FALSE; 3148 nxge_status_t status = NXGE_OK; 3149 tdmc_inj_par_err_t par_err; 3150 uint32_t value; 3151 3152 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3153 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3154 channel = ldvp->channel; 3155 3156 tx_rings = nxgep->tx_rings->rings; 3157 tx_ring_p = tx_rings[index]; 3158 tdc_stats = tx_ring_p->tdc_stats; 3159 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3160 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3161 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3162 if ((rs = npi_txdma_ring_error_get(handle, channel, 3163 &tdc_stats->errlog)) != NPI_SUCCESS) 3164 return (NXGE_ERROR | rs); 3165 } 3166 3167 if (cs.bits.ldw.mbox_err) { 3168 tdc_stats->mbox_err++; 3169 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3170 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3171 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3172 "==> nxge_tx_err_evnts(channel %d): " 3173 "fatal error: mailbox", channel)); 3174 txchan_fatal = B_TRUE; 3175 } 3176 if (cs.bits.ldw.pkt_size_err) { 3177 tdc_stats->pkt_size_err++; 3178 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3179 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3180 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3181 "==> nxge_tx_err_evnts(channel %d): " 3182 "fatal error: pkt_size_err", channel)); 3183 txchan_fatal = B_TRUE; 3184 } 3185 if (cs.bits.ldw.tx_ring_oflow) { 3186 tdc_stats->tx_ring_oflow++; 3187 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3188 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3190 "==> nxge_tx_err_evnts(channel %d): " 3191 "fatal error: tx_ring_oflow", channel)); 3192 txchan_fatal = B_TRUE; 3193 } 3194 if (cs.bits.ldw.pref_buf_par_err) { 3195 tdc_stats->pre_buf_par_err++; 3196 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3197 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3199 "==> nxge_tx_err_evnts(channel %d): " 3200 "fatal error: pre_buf_par_err", channel)); 3201 /* Clear error injection source for parity error */ 3202 (void) npi_txdma_inj_par_error_get(handle, &value); 3203 par_err.value = value; 3204 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3205 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3206 txchan_fatal = B_TRUE; 3207 } 3208 if (cs.bits.ldw.nack_pref) { 3209 tdc_stats->nack_pref++; 3210 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3211 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3212 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3213 "==> nxge_tx_err_evnts(channel %d): " 3214 "fatal error: nack_pref", channel)); 3215 txchan_fatal = B_TRUE; 3216 } 3217 if (cs.bits.ldw.nack_pkt_rd) { 3218 tdc_stats->nack_pkt_rd++; 3219 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3220 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3222 "==> nxge_tx_err_evnts(channel %d): " 3223 "fatal error: nack_pkt_rd", channel)); 3224 txchan_fatal = B_TRUE; 3225 } 3226 if (cs.bits.ldw.conf_part_err) { 3227 tdc_stats->conf_part_err++; 3228 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3229 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3231 "==> nxge_tx_err_evnts(channel %d): " 3232 "fatal error: config_partition_err", channel)); 3233 txchan_fatal = B_TRUE; 3234 } 3235 if (cs.bits.ldw.pkt_prt_err) { 3236 tdc_stats->pkt_part_err++; 3237 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3238 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3239 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3240 "==> nxge_tx_err_evnts(channel %d): " 3241 "fatal error: pkt_prt_err", channel)); 3242 txchan_fatal = B_TRUE; 3243 } 3244 3245 /* Clear error injection source in case this is an injected error */ 3246 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3247 3248 if (txchan_fatal) { 3249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3250 " nxge_tx_err_evnts: " 3251 " fatal error on channel %d cs 0x%llx\n", 3252 channel, cs.value)); 3253 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3254 tx_ring_p); 3255 if (status == NXGE_OK) { 3256 FM_SERVICE_RESTORED(nxgep); 3257 } 3258 } 3259 3260 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3261 3262 return (status); 3263 } 3264 3265 static nxge_status_t 3266 nxge_txdma_fatal_err_recover( 3267 p_nxge_t nxgep, 3268 uint16_t channel, 3269 p_tx_ring_t tx_ring_p) 3270 { 3271 npi_handle_t handle; 3272 npi_status_t rs = NPI_SUCCESS; 3273 p_tx_mbox_t tx_mbox_p; 3274 nxge_status_t status = NXGE_OK; 3275 3276 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3278 "Recovering from TxDMAChannel#%d error...", channel)); 3279 3280 /* 3281 * Stop the dma channel waits for the stop done. 3282 * If the stop done bit is not set, then create 3283 * an error. 3284 */ 3285 3286 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3287 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3288 MUTEX_ENTER(&tx_ring_p->lock); 3289 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3290 if (rs != NPI_SUCCESS) { 3291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3292 "==> nxge_txdma_fatal_err_recover (channel %d): " 3293 "stop failed ", channel)); 3294 goto fail; 3295 } 3296 3297 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3298 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3299 3300 /* 3301 * Reset TXDMA channel 3302 */ 3303 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3304 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3305 NPI_SUCCESS) { 3306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3307 "==> nxge_txdma_fatal_err_recover (channel %d)" 3308 " reset channel failed 0x%x", channel, rs)); 3309 goto fail; 3310 } 3311 3312 /* 3313 * Reset the tail (kick) register to 0. 3314 * (Hardware will not reset it. Tx overflow fatal 3315 * error if tail is not set to 0 after reset! 3316 */ 3317 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3318 3319 /* Restart TXDMA channel */ 3320 3321 if (!isLDOMguest(nxgep)) { 3322 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3323 3324 // XXX This is a problem in HIO! 3325 /* 3326 * Initialize the TXDMA channel specific FZC control 3327 * configurations. These FZC registers are pertaining 3328 * to each TX channel (i.e. logical pages). 3329 */ 3330 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3331 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3332 tx_ring_p, tx_mbox_p); 3333 if (status != NXGE_OK) 3334 goto fail; 3335 } 3336 3337 /* 3338 * Initialize the event masks. 3339 */ 3340 tx_ring_p->tx_evmask.value = 0; 3341 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3342 &tx_ring_p->tx_evmask); 3343 if (status != NXGE_OK) 3344 goto fail; 3345 3346 tx_ring_p->wr_index_wrap = B_FALSE; 3347 tx_ring_p->wr_index = 0; 3348 tx_ring_p->rd_index = 0; 3349 3350 /* 3351 * Load TXDMA descriptors, buffers, mailbox, 3352 * initialise the DMA channels and 3353 * enable each DMA channel. 3354 */ 3355 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3356 status = nxge_enable_txdma_channel(nxgep, channel, 3357 tx_ring_p, tx_mbox_p); 3358 MUTEX_EXIT(&tx_ring_p->lock); 3359 if (status != NXGE_OK) 3360 goto fail; 3361 3362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3363 "Recovery Successful, TxDMAChannel#%d Restored", 3364 channel)); 3365 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3366 3367 return (NXGE_OK); 3368 3369 fail: 3370 MUTEX_EXIT(&tx_ring_p->lock); 3371 3372 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3373 "nxge_txdma_fatal_err_recover (channel %d): " 3374 "failed to recover this txdma channel", channel)); 3375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3376 3377 return (status); 3378 } 3379 3380 /* 3381 * nxge_tx_port_fatal_err_recover 3382 * 3383 * Attempt to recover from a fatal port error. 3384 * 3385 * Arguments: 3386 * nxgep 3387 * 3388 * Notes: 3389 * How would a guest do this? 3390 * 3391 * NPI/NXGE function calls: 3392 * 3393 * Registers accessed: 3394 * 3395 * Context: 3396 * Service domain 3397 */ 3398 nxge_status_t 3399 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3400 { 3401 nxge_grp_set_t *set = &nxgep->tx_set; 3402 nxge_channel_t tdc; 3403 3404 tx_ring_t *ring; 3405 tx_mbox_t *mailbox; 3406 3407 npi_handle_t handle; 3408 nxge_status_t status; 3409 npi_status_t rs; 3410 3411 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3412 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3413 "Recovering from TxPort error...")); 3414 3415 if (isLDOMguest(nxgep)) { 3416 return (NXGE_OK); 3417 } 3418 3419 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3420 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3421 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3422 return (NXGE_ERROR); 3423 } 3424 3425 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3426 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3427 "<== nxge_tx_port_fatal_err_recover: " 3428 "NULL ring pointer(s)")); 3429 return (NXGE_ERROR); 3430 } 3431 3432 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3433 if ((1 << tdc) & set->owned.map) { 3434 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3435 if (ring) 3436 MUTEX_ENTER(&ring->lock); 3437 } 3438 } 3439 3440 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3441 3442 /* 3443 * Stop all the TDCs owned by us. 3444 * (The shared TDCs will have been stopped by their owners.) 3445 */ 3446 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3447 if ((1 << tdc) & set->owned.map) { 3448 ring = nxgep->tx_rings->rings[tdc]; 3449 if (ring) { 3450 rs = npi_txdma_channel_control 3451 (handle, TXDMA_STOP, tdc); 3452 if (rs != NPI_SUCCESS) { 3453 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3454 "nxge_tx_port_fatal_err_recover " 3455 "(channel %d): stop failed ", tdc)); 3456 goto fail; 3457 } 3458 } 3459 } 3460 } 3461 3462 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3463 3464 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3465 if ((1 << tdc) & set->owned.map) { 3466 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3467 if (ring) { 3468 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3469 } 3470 } 3471 } 3472 3473 /* 3474 * Reset all the TDCs. 3475 */ 3476 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3477 3478 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3479 if ((1 << tdc) & set->owned.map) { 3480 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3481 if (ring) { 3482 if ((rs = npi_txdma_channel_control 3483 (handle, TXDMA_RESET, tdc)) 3484 != NPI_SUCCESS) { 3485 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3486 "nxge_tx_port_fatal_err_recover " 3487 "(channel %d) reset channel " 3488 "failed 0x%x", tdc, rs)); 3489 goto fail; 3490 } 3491 } 3492 /* 3493 * Reset the tail (kick) register to 0. 3494 * (Hardware will not reset it. Tx overflow fatal 3495 * error if tail is not set to 0 after reset! 3496 */ 3497 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3498 } 3499 } 3500 3501 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3502 3503 /* Restart all the TDCs */ 3504 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3505 if ((1 << tdc) & set->owned.map) { 3506 ring = nxgep->tx_rings->rings[tdc]; 3507 if (ring) { 3508 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3509 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3510 ring, mailbox); 3511 ring->tx_evmask.value = 0; 3512 /* 3513 * Initialize the event masks. 3514 */ 3515 status = nxge_init_txdma_channel_event_mask 3516 (nxgep, tdc, &ring->tx_evmask); 3517 3518 ring->wr_index_wrap = B_FALSE; 3519 ring->wr_index = 0; 3520 ring->rd_index = 0; 3521 3522 if (status != NXGE_OK) 3523 goto fail; 3524 if (status != NXGE_OK) 3525 goto fail; 3526 } 3527 } 3528 } 3529 3530 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3531 3532 /* Re-enable all the TDCs */ 3533 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3534 if ((1 << tdc) & set->owned.map) { 3535 ring = nxgep->tx_rings->rings[tdc]; 3536 if (ring) { 3537 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3538 status = nxge_enable_txdma_channel(nxgep, tdc, 3539 ring, mailbox); 3540 if (status != NXGE_OK) 3541 goto fail; 3542 } 3543 } 3544 } 3545 3546 /* 3547 * Unlock all the TDCs. 3548 */ 3549 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3550 if ((1 << tdc) & set->owned.map) { 3551 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3552 if (ring) 3553 MUTEX_EXIT(&ring->lock); 3554 } 3555 } 3556 3557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3558 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3559 3560 return (NXGE_OK); 3561 3562 fail: 3563 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3564 if ((1 << tdc) & set->owned.map) { 3565 ring = nxgep->tx_rings->rings[tdc]; 3566 if (ring) 3567 MUTEX_EXIT(&ring->lock); 3568 } 3569 } 3570 3571 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3572 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3573 3574 return (status); 3575 } 3576 3577 /* 3578 * nxge_txdma_inject_err 3579 * 3580 * Inject an error into a TDC. 3581 * 3582 * Arguments: 3583 * nxgep 3584 * err_id The error to inject. 3585 * chan The channel to inject into. 3586 * 3587 * Notes: 3588 * This is called from nxge_main.c:nxge_err_inject() 3589 * Has this ioctl ever been used? 3590 * 3591 * NPI/NXGE function calls: 3592 * npi_txdma_inj_par_error_get() 3593 * npi_txdma_inj_par_error_set() 3594 * 3595 * Registers accessed: 3596 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3597 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3598 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3599 * 3600 * Context: 3601 * Service domain 3602 */ 3603 void 3604 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3605 { 3606 tdmc_intr_dbg_t tdi; 3607 tdmc_inj_par_err_t par_err; 3608 uint32_t value; 3609 npi_handle_t handle; 3610 3611 switch (err_id) { 3612 3613 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3614 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3615 /* Clear error injection source for parity error */ 3616 (void) npi_txdma_inj_par_error_get(handle, &value); 3617 par_err.value = value; 3618 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3619 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3620 3621 par_err.bits.ldw.inject_parity_error = (1 << chan); 3622 (void) npi_txdma_inj_par_error_get(handle, &value); 3623 par_err.value = value; 3624 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3625 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3626 (unsigned long long)par_err.value); 3627 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3628 break; 3629 3630 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3631 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3632 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3633 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3634 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3635 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3636 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3637 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3638 chan, &tdi.value); 3639 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3640 tdi.bits.ldw.pref_buf_par_err = 1; 3641 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3642 tdi.bits.ldw.mbox_err = 1; 3643 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3644 tdi.bits.ldw.nack_pref = 1; 3645 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3646 tdi.bits.ldw.nack_pkt_rd = 1; 3647 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3648 tdi.bits.ldw.pkt_size_err = 1; 3649 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3650 tdi.bits.ldw.tx_ring_oflow = 1; 3651 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3652 tdi.bits.ldw.conf_part_err = 1; 3653 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3654 tdi.bits.ldw.pkt_part_err = 1; 3655 #if defined(__i386) 3656 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3657 tdi.value); 3658 #else 3659 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3660 tdi.value); 3661 #endif 3662 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3663 chan, tdi.value); 3664 3665 break; 3666 } 3667 } 3668