1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_txdma.h> 30 #include <sys/llc1.h> 31 32 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 33 uint32_t nxge_tx_minfree = 32; 34 uint32_t nxge_tx_intr_thres = 0; 35 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 36 uint32_t nxge_tx_tiny_pack = 1; 37 uint32_t nxge_tx_use_bcopy = 1; 38 39 extern uint32_t nxge_tx_ring_size; 40 extern uint32_t nxge_bcopy_thresh; 41 extern uint32_t nxge_dvma_thresh; 42 extern uint32_t nxge_dma_stream_thresh; 43 extern dma_method_t nxge_force_dma; 44 45 /* Device register access attributes for PIO. */ 46 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 47 /* Device descriptor access attributes for DMA. */ 48 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t nxge_desc_dma_attr; 52 extern ddi_dma_attr_t nxge_tx_dma_attr; 53 54 static nxge_status_t nxge_map_txdma(p_nxge_t); 55 static void nxge_unmap_txdma(p_nxge_t); 56 57 static nxge_status_t nxge_txdma_hw_start(p_nxge_t); 58 static void nxge_txdma_hw_stop(p_nxge_t); 59 60 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 61 p_nxge_dma_common_t *, p_tx_ring_t *, 62 uint32_t, p_nxge_dma_common_t *, 63 p_tx_mbox_t *); 64 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t, 65 p_tx_ring_t, p_tx_mbox_t); 66 67 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 68 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 69 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 70 71 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 72 p_nxge_dma_common_t *, p_tx_ring_t, 73 p_tx_mbox_t *); 74 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 75 p_tx_ring_t, p_tx_mbox_t); 76 77 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 78 p_tx_ring_t, p_tx_mbox_t); 79 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 82 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 83 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 84 p_nxge_ldv_t, tx_cs_t); 85 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 86 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 87 uint16_t, p_tx_ring_t); 88 89 nxge_status_t 90 nxge_init_txdma_channels(p_nxge_t nxgep) 91 { 92 nxge_status_t status = NXGE_OK; 93 94 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels")); 95 96 status = nxge_map_txdma(nxgep); 97 if (status != NXGE_OK) { 98 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 99 "<== nxge_init_txdma_channels: status 0x%x", status)); 100 return (status); 101 } 102 103 status = nxge_txdma_hw_start(nxgep); 104 if (status != NXGE_OK) { 105 nxge_unmap_txdma(nxgep); 106 return (status); 107 } 108 109 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 110 "<== nxge_init_txdma_channels: status 0x%x", status)); 111 112 return (NXGE_OK); 113 } 114 115 void 116 nxge_uninit_txdma_channels(p_nxge_t nxgep) 117 { 118 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels")); 119 120 nxge_txdma_hw_stop(nxgep); 121 nxge_unmap_txdma(nxgep); 122 123 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 124 "<== nxge_uinit_txdma_channels")); 125 } 126 127 void 128 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 129 uint32_t entries, uint32_t size) 130 { 131 size_t tsize; 132 *dest_p = *src_p; 133 tsize = size * entries; 134 dest_p->alength = tsize; 135 dest_p->nblocks = entries; 136 dest_p->block_size = size; 137 dest_p->offset += tsize; 138 139 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 140 src_p->alength -= tsize; 141 src_p->dma_cookie.dmac_laddress += tsize; 142 src_p->dma_cookie.dmac_size -= tsize; 143 } 144 145 nxge_status_t 146 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 147 { 148 npi_status_t rs = NPI_SUCCESS; 149 nxge_status_t status = NXGE_OK; 150 npi_handle_t handle; 151 152 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 153 154 handle = NXGE_DEV_NPI_HANDLE(nxgep); 155 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 156 rs = npi_txdma_channel_reset(handle, channel); 157 } else { 158 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 159 channel); 160 } 161 162 if (rs != NPI_SUCCESS) { 163 status = NXGE_ERROR | rs; 164 } 165 166 /* 167 * Reset the tail (kick) register to 0. 168 * (Hardware will not reset it. Tx overflow fatal 169 * error if tail is not set to 0 after reset! 170 */ 171 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 172 173 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 174 return (status); 175 } 176 177 nxge_status_t 178 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 179 p_tx_dma_ent_msk_t mask_p) 180 { 181 npi_handle_t handle; 182 npi_status_t rs = NPI_SUCCESS; 183 nxge_status_t status = NXGE_OK; 184 185 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 186 "<== nxge_init_txdma_channel_event_mask")); 187 188 handle = NXGE_DEV_NPI_HANDLE(nxgep); 189 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 190 if (rs != NPI_SUCCESS) { 191 status = NXGE_ERROR | rs; 192 } 193 194 return (status); 195 } 196 197 nxge_status_t 198 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 199 uint64_t reg_data) 200 { 201 npi_handle_t handle; 202 npi_status_t rs = NPI_SUCCESS; 203 nxge_status_t status = NXGE_OK; 204 205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 206 "<== nxge_init_txdma_channel_cntl_stat")); 207 208 handle = NXGE_DEV_NPI_HANDLE(nxgep); 209 rs = npi_txdma_control_status(handle, OP_SET, channel, 210 (p_tx_cs_t)®_data); 211 212 if (rs != NPI_SUCCESS) { 213 status = NXGE_ERROR | rs; 214 } 215 216 return (status); 217 } 218 219 nxge_status_t 220 nxge_enable_txdma_channel(p_nxge_t nxgep, 221 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 222 { 223 npi_handle_t handle; 224 npi_status_t rs = NPI_SUCCESS; 225 nxge_status_t status = NXGE_OK; 226 227 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 228 229 handle = NXGE_DEV_NPI_HANDLE(nxgep); 230 /* 231 * Use configuration data composed at init time. 232 * Write to hardware the transmit ring configurations. 233 */ 234 rs = npi_txdma_ring_config(handle, OP_SET, channel, 235 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 236 237 if (rs != NPI_SUCCESS) { 238 return (NXGE_ERROR | rs); 239 } 240 241 /* Write to hardware the mailbox */ 242 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 243 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 244 245 if (rs != NPI_SUCCESS) { 246 return (NXGE_ERROR | rs); 247 } 248 249 /* Start the DMA engine. */ 250 rs = npi_txdma_channel_init_enable(handle, channel); 251 252 if (rs != NPI_SUCCESS) { 253 return (NXGE_ERROR | rs); 254 } 255 256 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 257 258 return (status); 259 } 260 261 void 262 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 263 boolean_t l4_cksum, int pkt_len, uint8_t npads, 264 p_tx_pkt_hdr_all_t pkthdrp) 265 { 266 p_tx_pkt_header_t hdrp; 267 p_mblk_t nmp; 268 uint64_t tmp; 269 size_t mblk_len; 270 size_t iph_len; 271 size_t hdrs_size; 272 uint8_t hdrs_buf[sizeof (struct ether_header) + 273 64 + sizeof (uint32_t)]; 274 uint8_t *ip_buf; 275 uint16_t eth_type; 276 uint8_t ipproto; 277 boolean_t is_vlan = B_FALSE; 278 size_t eth_hdr_size; 279 280 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 281 282 /* 283 * Caller should zero out the headers first. 284 */ 285 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 286 287 if (fill_len) { 288 NXGE_DEBUG_MSG((NULL, TX_CTL, 289 "==> nxge_fill_tx_hdr: pkt_len %d " 290 "npads %d", pkt_len, npads)); 291 tmp = (uint64_t)pkt_len; 292 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 293 goto fill_tx_header_done; 294 } 295 296 tmp = (uint64_t)npads; 297 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 298 299 /* 300 * mp is the original data packet (does not include the 301 * Neptune transmit header). 302 */ 303 nmp = mp; 304 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 305 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 306 "mp $%p b_rptr $%p len %d", 307 mp, nmp->b_rptr, mblk_len)); 308 ip_buf = NULL; 309 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 310 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 311 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 312 "ether type 0x%x", eth_type, hdrp->value)); 313 314 if (eth_type < ETHERMTU) { 315 tmp = 1ull; 316 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 317 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 318 "value 0x%llx", hdrp->value)); 319 if (*(hdrs_buf + sizeof (struct ether_header)) 320 == LLC_SNAP_SAP) { 321 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 322 sizeof (struct ether_header) + 6))); 323 NXGE_DEBUG_MSG((NULL, TX_CTL, 324 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 325 eth_type)); 326 } else { 327 goto fill_tx_header_done; 328 } 329 } else if (eth_type == VLAN_ETHERTYPE) { 330 tmp = 1ull; 331 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 332 333 eth_type = ntohs(((struct ether_vlan_header *) 334 hdrs_buf)->ether_type); 335 is_vlan = B_TRUE; 336 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 337 "value 0x%llx", hdrp->value)); 338 } 339 340 if (!is_vlan) { 341 eth_hdr_size = sizeof (struct ether_header); 342 } else { 343 eth_hdr_size = sizeof (struct ether_vlan_header); 344 } 345 346 switch (eth_type) { 347 case ETHERTYPE_IP: 348 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 349 ip_buf = nmp->b_rptr + eth_hdr_size; 350 mblk_len -= eth_hdr_size; 351 iph_len = ((*ip_buf) & 0x0f); 352 if (mblk_len > (iph_len + sizeof (uint32_t))) { 353 ip_buf = nmp->b_rptr; 354 ip_buf += eth_hdr_size; 355 } else { 356 ip_buf = NULL; 357 } 358 359 } 360 if (ip_buf == NULL) { 361 hdrs_size = 0; 362 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 363 while ((nmp) && (hdrs_size < 364 sizeof (hdrs_buf))) { 365 mblk_len = (size_t)nmp->b_wptr - 366 (size_t)nmp->b_rptr; 367 if (mblk_len >= 368 (sizeof (hdrs_buf) - hdrs_size)) 369 mblk_len = sizeof (hdrs_buf) - 370 hdrs_size; 371 bcopy(nmp->b_rptr, 372 &hdrs_buf[hdrs_size], mblk_len); 373 hdrs_size += mblk_len; 374 nmp = nmp->b_cont; 375 } 376 ip_buf = hdrs_buf; 377 ip_buf += eth_hdr_size; 378 iph_len = ((*ip_buf) & 0x0f); 379 } 380 381 ipproto = ip_buf[9]; 382 383 tmp = (uint64_t)iph_len; 384 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 385 tmp = (uint64_t)(eth_hdr_size >> 1); 386 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 387 388 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 389 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 390 "tmp 0x%x", 391 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 392 ipproto, tmp)); 393 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 394 "value 0x%llx", hdrp->value)); 395 396 break; 397 398 case ETHERTYPE_IPV6: 399 hdrs_size = 0; 400 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 401 while ((nmp) && (hdrs_size < 402 sizeof (hdrs_buf))) { 403 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 404 if (mblk_len >= 405 (sizeof (hdrs_buf) - hdrs_size)) 406 mblk_len = sizeof (hdrs_buf) - 407 hdrs_size; 408 bcopy(nmp->b_rptr, 409 &hdrs_buf[hdrs_size], mblk_len); 410 hdrs_size += mblk_len; 411 nmp = nmp->b_cont; 412 } 413 ip_buf = hdrs_buf; 414 ip_buf += eth_hdr_size; 415 416 tmp = 1ull; 417 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 418 419 tmp = (eth_hdr_size >> 1); 420 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 421 422 /* byte 6 is the next header protocol */ 423 ipproto = ip_buf[6]; 424 425 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 426 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 427 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 428 ipproto)); 429 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 430 "value 0x%llx", hdrp->value)); 431 432 break; 433 434 default: 435 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 436 goto fill_tx_header_done; 437 } 438 439 switch (ipproto) { 440 case IPPROTO_TCP: 441 NXGE_DEBUG_MSG((NULL, TX_CTL, 442 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 443 if (l4_cksum) { 444 tmp = 1ull; 445 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 446 NXGE_DEBUG_MSG((NULL, TX_CTL, 447 "==> nxge_tx_pkt_hdr_init: TCP CKSUM" 448 "value 0x%llx", hdrp->value)); 449 } 450 451 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 452 "value 0x%llx", hdrp->value)); 453 break; 454 455 case IPPROTO_UDP: 456 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 457 if (l4_cksum) { 458 tmp = 0x2ull; 459 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 460 } 461 NXGE_DEBUG_MSG((NULL, TX_CTL, 462 "==> nxge_tx_pkt_hdr_init: UDP" 463 "value 0x%llx", hdrp->value)); 464 break; 465 466 default: 467 goto fill_tx_header_done; 468 } 469 470 fill_tx_header_done: 471 NXGE_DEBUG_MSG((NULL, TX_CTL, 472 "==> nxge_fill_tx_hdr: pkt_len %d " 473 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 474 475 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 476 } 477 478 /*ARGSUSED*/ 479 p_mblk_t 480 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 481 { 482 p_mblk_t newmp = NULL; 483 484 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 485 NXGE_DEBUG_MSG((NULL, TX_CTL, 486 "<== nxge_tx_pkt_header_reserve: allocb failed")); 487 return (NULL); 488 } 489 490 NXGE_DEBUG_MSG((NULL, TX_CTL, 491 "==> nxge_tx_pkt_header_reserve: get new mp")); 492 DB_TYPE(newmp) = M_DATA; 493 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 494 linkb(newmp, mp); 495 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 496 497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 498 "b_rptr $%p b_wptr $%p", 499 newmp->b_rptr, newmp->b_wptr)); 500 501 NXGE_DEBUG_MSG((NULL, TX_CTL, 502 "<== nxge_tx_pkt_header_reserve: use new mp")); 503 504 return (newmp); 505 } 506 507 int 508 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 509 { 510 uint_t nmblks; 511 ssize_t len; 512 uint_t pkt_len; 513 p_mblk_t nmp, bmp, tmp; 514 uint8_t *b_wptr; 515 516 NXGE_DEBUG_MSG((NULL, TX_CTL, 517 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 518 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 519 520 nmp = mp; 521 bmp = mp; 522 nmblks = 0; 523 pkt_len = 0; 524 *tot_xfer_len_p = 0; 525 526 while (nmp) { 527 len = MBLKL(nmp); 528 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 529 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 530 len, pkt_len, nmblks, 531 *tot_xfer_len_p)); 532 533 if (len <= 0) { 534 bmp = nmp; 535 nmp = nmp->b_cont; 536 NXGE_DEBUG_MSG((NULL, TX_CTL, 537 "==> nxge_tx_pkt_nmblocks: " 538 "len (0) pkt_len %d nmblks %d", 539 pkt_len, nmblks)); 540 continue; 541 } 542 543 *tot_xfer_len_p += len; 544 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 545 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 546 len, pkt_len, nmblks, 547 *tot_xfer_len_p)); 548 549 if (len < nxge_bcopy_thresh) { 550 NXGE_DEBUG_MSG((NULL, TX_CTL, 551 "==> nxge_tx_pkt_nmblocks: " 552 "len %d (< thresh) pkt_len %d nmblks %d", 553 len, pkt_len, nmblks)); 554 if (pkt_len == 0) 555 nmblks++; 556 pkt_len += len; 557 if (pkt_len >= nxge_bcopy_thresh) { 558 pkt_len = 0; 559 len = 0; 560 nmp = bmp; 561 } 562 } else { 563 NXGE_DEBUG_MSG((NULL, TX_CTL, 564 "==> nxge_tx_pkt_nmblocks: " 565 "len %d (> thresh) pkt_len %d nmblks %d", 566 len, pkt_len, nmblks)); 567 pkt_len = 0; 568 nmblks++; 569 /* 570 * Hardware limits the transfer length to 4K. 571 * If len is more than 4K, we need to break 572 * it up to at most 2 more blocks. 573 */ 574 if (len > TX_MAX_TRANSFER_LENGTH) { 575 uint32_t nsegs; 576 577 NXGE_DEBUG_MSG((NULL, TX_CTL, 578 "==> nxge_tx_pkt_nmblocks: " 579 "len %d pkt_len %d nmblks %d nsegs %d", 580 len, pkt_len, nmblks, nsegs)); 581 nsegs = 1; 582 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 583 ++nsegs; 584 } 585 do { 586 b_wptr = nmp->b_rptr + 587 TX_MAX_TRANSFER_LENGTH; 588 nmp->b_wptr = b_wptr; 589 if ((tmp = dupb(nmp)) == NULL) { 590 return (0); 591 } 592 tmp->b_rptr = b_wptr; 593 tmp->b_wptr = nmp->b_wptr; 594 tmp->b_cont = nmp->b_cont; 595 nmp->b_cont = tmp; 596 nmblks++; 597 if (--nsegs) { 598 nmp = tmp; 599 } 600 } while (nsegs); 601 nmp = tmp; 602 } 603 } 604 605 /* 606 * Hardware limits the transmit gather pointers to 15. 607 */ 608 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 609 TX_MAX_GATHER_POINTERS) { 610 NXGE_DEBUG_MSG((NULL, TX_CTL, 611 "==> nxge_tx_pkt_nmblocks: pull msg - " 612 "len %d pkt_len %d nmblks %d", 613 len, pkt_len, nmblks)); 614 /* Pull all message blocks from b_cont */ 615 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 616 return (0); 617 } 618 freemsg(nmp->b_cont); 619 nmp->b_cont = tmp; 620 pkt_len = 0; 621 } 622 bmp = nmp; 623 nmp = nmp->b_cont; 624 } 625 626 NXGE_DEBUG_MSG((NULL, TX_CTL, 627 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 628 "nmblks %d len %d tot_xfer_len %d", 629 mp->b_rptr, mp->b_wptr, nmblks, 630 MBLKL(mp), *tot_xfer_len_p)); 631 632 return (nmblks); 633 } 634 635 boolean_t 636 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 637 { 638 boolean_t status = B_TRUE; 639 p_nxge_dma_common_t tx_desc_dma_p; 640 nxge_dma_common_t desc_area; 641 p_tx_desc_t tx_desc_ring_vp; 642 p_tx_desc_t tx_desc_p; 643 p_tx_desc_t tx_desc_pp; 644 tx_desc_t r_tx_desc; 645 p_tx_msg_t tx_msg_ring; 646 p_tx_msg_t tx_msg_p; 647 npi_handle_t handle; 648 tx_ring_hdl_t tx_head; 649 uint32_t pkt_len; 650 uint_t tx_rd_index; 651 uint16_t head_index, tail_index; 652 uint8_t tdc; 653 boolean_t head_wrap, tail_wrap; 654 p_nxge_tx_ring_stats_t tdc_stats; 655 int rc; 656 657 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 658 659 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 660 (nmblks != 0)); 661 NXGE_DEBUG_MSG((nxgep, TX_CTL, 662 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 663 tx_ring_p->descs_pending, nxge_reclaim_pending, 664 nmblks)); 665 if (!status) { 666 tx_desc_dma_p = &tx_ring_p->tdc_desc; 667 desc_area = tx_ring_p->tdc_desc; 668 handle = NXGE_DEV_NPI_HANDLE(nxgep); 669 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 670 tx_desc_ring_vp = 671 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 672 tx_rd_index = tx_ring_p->rd_index; 673 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 674 tx_msg_ring = tx_ring_p->tx_msg_ring; 675 tx_msg_p = &tx_msg_ring[tx_rd_index]; 676 tdc = tx_ring_p->tdc; 677 tdc_stats = tx_ring_p->tdc_stats; 678 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 679 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 680 } 681 682 tail_index = tx_ring_p->wr_index; 683 tail_wrap = tx_ring_p->wr_index_wrap; 684 685 NXGE_DEBUG_MSG((nxgep, TX_CTL, 686 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 687 "tail_index %d tail_wrap %d " 688 "tx_desc_p $%p ($%p) ", 689 tdc, tx_rd_index, tail_index, tail_wrap, 690 tx_desc_p, (*(uint64_t *)tx_desc_p))); 691 /* 692 * Read the hardware maintained transmit head 693 * and wrap around bit. 694 */ 695 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 696 head_index = tx_head.bits.ldw.head; 697 head_wrap = tx_head.bits.ldw.wrap; 698 NXGE_DEBUG_MSG((nxgep, TX_CTL, 699 "==> nxge_txdma_reclaim: " 700 "tx_rd_index %d tail %d tail_wrap %d " 701 "head %d wrap %d", 702 tx_rd_index, tail_index, tail_wrap, 703 head_index, head_wrap)); 704 705 if (head_index == tail_index) { 706 if (TXDMA_RING_EMPTY(head_index, head_wrap, 707 tail_index, tail_wrap) && 708 (head_index == tx_rd_index)) { 709 NXGE_DEBUG_MSG((nxgep, TX_CTL, 710 "==> nxge_txdma_reclaim: EMPTY")); 711 return (B_TRUE); 712 } 713 714 NXGE_DEBUG_MSG((nxgep, TX_CTL, 715 "==> nxge_txdma_reclaim: Checking " 716 "if ring full")); 717 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 718 tail_wrap)) { 719 NXGE_DEBUG_MSG((nxgep, TX_CTL, 720 "==> nxge_txdma_reclaim: full")); 721 return (B_FALSE); 722 } 723 } 724 725 NXGE_DEBUG_MSG((nxgep, TX_CTL, 726 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 727 728 tx_desc_pp = &r_tx_desc; 729 while ((tx_rd_index != head_index) && 730 (tx_ring_p->descs_pending != 0)) { 731 732 NXGE_DEBUG_MSG((nxgep, TX_CTL, 733 "==> nxge_txdma_reclaim: Checking if pending")); 734 735 NXGE_DEBUG_MSG((nxgep, TX_CTL, 736 "==> nxge_txdma_reclaim: " 737 "descs_pending %d ", 738 tx_ring_p->descs_pending)); 739 740 NXGE_DEBUG_MSG((nxgep, TX_CTL, 741 "==> nxge_txdma_reclaim: " 742 "(tx_rd_index %d head_index %d " 743 "(tx_desc_p $%p)", 744 tx_rd_index, head_index, 745 tx_desc_p)); 746 747 tx_desc_pp->value = tx_desc_p->value; 748 NXGE_DEBUG_MSG((nxgep, TX_CTL, 749 "==> nxge_txdma_reclaim: " 750 "(tx_rd_index %d head_index %d " 751 "tx_desc_p $%p (desc value 0x%llx) ", 752 tx_rd_index, head_index, 753 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 754 755 NXGE_DEBUG_MSG((nxgep, TX_CTL, 756 "==> nxge_txdma_reclaim: dump desc:")); 757 758 pkt_len = tx_desc_pp->bits.hdw.tr_len; 759 tdc_stats->obytes += pkt_len; 760 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 761 NXGE_DEBUG_MSG((nxgep, TX_CTL, 762 "==> nxge_txdma_reclaim: pkt_len %d " 763 "tdc channel %d opackets %d", 764 pkt_len, 765 tdc, 766 tdc_stats->opackets)); 767 768 if (tx_msg_p->flags.dma_type == USE_DVMA) { 769 NXGE_DEBUG_MSG((nxgep, TX_CTL, 770 "tx_desc_p = $%p " 771 "tx_desc_pp = $%p " 772 "index = %d", 773 tx_desc_p, 774 tx_desc_pp, 775 tx_ring_p->rd_index)); 776 (void) dvma_unload(tx_msg_p->dvma_handle, 777 0, -1); 778 tx_msg_p->dvma_handle = NULL; 779 if (tx_ring_p->dvma_wr_index == 780 tx_ring_p->dvma_wrap_mask) { 781 tx_ring_p->dvma_wr_index = 0; 782 } else { 783 tx_ring_p->dvma_wr_index++; 784 } 785 tx_ring_p->dvma_pending--; 786 } else if (tx_msg_p->flags.dma_type == 787 USE_DMA) { 788 NXGE_DEBUG_MSG((nxgep, TX_CTL, 789 "==> nxge_txdma_reclaim: " 790 "USE DMA")); 791 if (rc = ddi_dma_unbind_handle 792 (tx_msg_p->dma_handle)) { 793 cmn_err(CE_WARN, "!nxge_reclaim: " 794 "ddi_dma_unbind_handle " 795 "failed. status %d", rc); 796 } 797 } 798 NXGE_DEBUG_MSG((nxgep, TX_CTL, 799 "==> nxge_txdma_reclaim: count packets")); 800 /* 801 * count a chained packet only once. 802 */ 803 if (tx_msg_p->tx_message != NULL) { 804 freemsg(tx_msg_p->tx_message); 805 tx_msg_p->tx_message = NULL; 806 } 807 808 tx_msg_p->flags.dma_type = USE_NONE; 809 tx_rd_index = tx_ring_p->rd_index; 810 tx_rd_index = (tx_rd_index + 1) & 811 tx_ring_p->tx_wrap_mask; 812 tx_ring_p->rd_index = tx_rd_index; 813 tx_ring_p->descs_pending--; 814 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 815 tx_msg_p = &tx_msg_ring[tx_rd_index]; 816 } 817 818 status = (nmblks <= (tx_ring_p->tx_ring_size - 819 tx_ring_p->descs_pending - 820 TX_FULL_MARK)); 821 if (status) { 822 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 823 } 824 } else { 825 status = (nmblks <= 826 (tx_ring_p->tx_ring_size - 827 tx_ring_p->descs_pending - 828 TX_FULL_MARK)); 829 } 830 831 NXGE_DEBUG_MSG((nxgep, TX_CTL, 832 "<== nxge_txdma_reclaim status = 0x%08x", status)); 833 834 return (status); 835 } 836 837 uint_t 838 nxge_tx_intr(void *arg1, void *arg2) 839 { 840 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 841 p_nxge_t nxgep = (p_nxge_t)arg2; 842 p_nxge_ldg_t ldgp; 843 uint8_t channel; 844 uint32_t vindex; 845 npi_handle_t handle; 846 tx_cs_t cs; 847 p_tx_ring_t *tx_rings; 848 p_tx_ring_t tx_ring_p; 849 npi_status_t rs = NPI_SUCCESS; 850 uint_t serviced = DDI_INTR_UNCLAIMED; 851 nxge_status_t status = NXGE_OK; 852 853 if (ldvp == NULL) { 854 NXGE_DEBUG_MSG((NULL, INT_CTL, 855 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 856 nxgep, ldvp)); 857 return (DDI_INTR_UNCLAIMED); 858 } 859 860 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 861 nxgep = ldvp->nxgep; 862 } 863 NXGE_DEBUG_MSG((nxgep, INT_CTL, 864 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 865 nxgep, ldvp)); 866 /* 867 * This interrupt handler is for a specific 868 * transmit dma channel. 869 */ 870 handle = NXGE_DEV_NPI_HANDLE(nxgep); 871 /* Get the control and status for this channel. */ 872 channel = ldvp->channel; 873 ldgp = ldvp->ldgp; 874 NXGE_DEBUG_MSG((nxgep, INT_CTL, 875 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 876 "channel %d", 877 nxgep, ldvp, channel)); 878 879 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 880 vindex = ldvp->vdma_index; 881 NXGE_DEBUG_MSG((nxgep, INT_CTL, 882 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 883 channel, vindex, rs)); 884 if (!rs && cs.bits.ldw.mk) { 885 NXGE_DEBUG_MSG((nxgep, INT_CTL, 886 "==> nxge_tx_intr:channel %d ring index %d " 887 "status 0x%08x (mk bit set)", 888 channel, vindex, rs)); 889 tx_rings = nxgep->tx_rings->rings; 890 tx_ring_p = tx_rings[vindex]; 891 NXGE_DEBUG_MSG((nxgep, INT_CTL, 892 "==> nxge_tx_intr:channel %d ring index %d " 893 "status 0x%08x (mk bit set, calling reclaim)", 894 channel, vindex, rs)); 895 896 MUTEX_ENTER(&tx_ring_p->lock); 897 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 898 MUTEX_EXIT(&tx_ring_p->lock); 899 mac_tx_update(nxgep->mach); 900 } 901 902 /* 903 * Process other transmit control and status. 904 * Check the ldv state. 905 */ 906 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 907 /* 908 * Rearm this logical group if this is a single device 909 * group. 910 */ 911 if (ldgp->nldvs == 1) { 912 NXGE_DEBUG_MSG((nxgep, INT_CTL, 913 "==> nxge_tx_intr: rearm")); 914 if (status == NXGE_OK) { 915 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 916 B_TRUE, ldgp->ldg_timer); 917 } 918 } 919 920 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 921 serviced = DDI_INTR_CLAIMED; 922 return (serviced); 923 } 924 925 void 926 nxge_txdma_stop(p_nxge_t nxgep) 927 { 928 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 929 930 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 931 932 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 933 } 934 935 void 936 nxge_txdma_stop_start(p_nxge_t nxgep) 937 { 938 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 939 940 (void) nxge_txdma_stop(nxgep); 941 942 (void) nxge_fixup_txdma_rings(nxgep); 943 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 944 (void) nxge_tx_mac_enable(nxgep); 945 (void) nxge_txdma_hw_kick(nxgep); 946 947 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 948 } 949 950 nxge_status_t 951 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 952 { 953 int i, ndmas; 954 uint16_t channel; 955 p_tx_rings_t tx_rings; 956 p_tx_ring_t *tx_desc_rings; 957 npi_handle_t handle; 958 npi_status_t rs = NPI_SUCCESS; 959 nxge_status_t status = NXGE_OK; 960 961 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 962 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 963 964 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 965 NXGE_DEBUG_MSG((nxgep, TX_CTL, 966 "<== nxge_txdma_mode: not initialized")); 967 return (NXGE_ERROR); 968 } 969 970 tx_rings = nxgep->tx_rings; 971 if (tx_rings == NULL) { 972 NXGE_DEBUG_MSG((nxgep, TX_CTL, 973 "<== nxge_txdma_hw_mode: NULL global ring pointer")); 974 return (NXGE_ERROR); 975 } 976 977 tx_desc_rings = tx_rings->rings; 978 if (tx_desc_rings == NULL) { 979 NXGE_DEBUG_MSG((nxgep, TX_CTL, 980 "<== nxge_txdma_hw_mode: NULL rings pointer")); 981 return (NXGE_ERROR); 982 } 983 984 ndmas = tx_rings->ndmas; 985 if (!ndmas) { 986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 987 "<== nxge_txdma_hw_mode: no dma channel allocated")); 988 return (NXGE_ERROR); 989 } 990 991 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: " 992 "tx_rings $%p tx_desc_rings $%p ndmas %d", 993 tx_rings, tx_desc_rings, ndmas)); 994 995 handle = NXGE_DEV_NPI_HANDLE(nxgep); 996 for (i = 0; i < ndmas; i++) { 997 if (tx_desc_rings[i] == NULL) { 998 continue; 999 } 1000 channel = tx_desc_rings[i]->tdc; 1001 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1002 "==> nxge_txdma_hw_mode: channel %d", channel)); 1003 if (enable) { 1004 rs = npi_txdma_channel_enable(handle, channel); 1005 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1006 "==> nxge_txdma_hw_mode: channel %d (enable) " 1007 "rs 0x%x", channel, rs)); 1008 } else { 1009 /* 1010 * Stop the dma channel and waits for the stop done. 1011 * If the stop done bit is not set, then force 1012 * an error so TXC will stop. 1013 * All channels bound to this port need to be stopped 1014 * and reset after injecting an interrupt error. 1015 */ 1016 rs = npi_txdma_channel_disable(handle, channel); 1017 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1018 "==> nxge_txdma_hw_mode: channel %d (disable) " 1019 "rs 0x%x", channel, rs)); 1020 { 1021 tdmc_intr_dbg_t intr_dbg; 1022 1023 if (rs != NPI_SUCCESS) { 1024 /* Inject any error */ 1025 intr_dbg.value = 0; 1026 intr_dbg.bits.ldw.nack_pref = 1; 1027 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1028 "==> nxge_txdma_hw_mode: " 1029 "channel %d (stop failed 0x%x) " 1030 "(inject err)", rs, channel)); 1031 (void) npi_txdma_inj_int_error_set( 1032 handle, channel, &intr_dbg); 1033 rs = npi_txdma_channel_disable(handle, 1034 channel); 1035 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1036 "==> nxge_txdma_hw_mode: " 1037 "channel %d (stop again 0x%x) " 1038 "(after inject err)", 1039 rs, channel)); 1040 } 1041 } 1042 } 1043 } 1044 1045 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1046 1047 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1048 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1049 1050 return (status); 1051 } 1052 1053 void 1054 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1055 { 1056 npi_handle_t handle; 1057 1058 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1059 "==> nxge_txdma_enable_channel: channel %d", channel)); 1060 1061 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1062 /* enable the transmit dma channels */ 1063 (void) npi_txdma_channel_enable(handle, channel); 1064 1065 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1066 } 1067 1068 void 1069 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1070 { 1071 npi_handle_t handle; 1072 1073 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1074 "==> nxge_txdma_disable_channel: channel %d", channel)); 1075 1076 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1077 /* stop the transmit dma channels */ 1078 (void) npi_txdma_channel_disable(handle, channel); 1079 1080 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1081 } 1082 1083 int 1084 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1085 { 1086 npi_handle_t handle; 1087 tdmc_intr_dbg_t intr_dbg; 1088 int status; 1089 npi_status_t rs = NPI_SUCCESS; 1090 1091 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1092 /* 1093 * Stop the dma channel waits for the stop done. 1094 * If the stop done bit is not set, then create 1095 * an error. 1096 */ 1097 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1098 rs = npi_txdma_channel_disable(handle, channel); 1099 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1100 if (status == NXGE_OK) { 1101 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1102 "<== nxge_txdma_stop_inj_err (channel %d): " 1103 "stopped OK", channel)); 1104 return (status); 1105 } 1106 1107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1108 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1109 "injecting error", channel, rs)); 1110 /* Inject any error */ 1111 intr_dbg.value = 0; 1112 intr_dbg.bits.ldw.nack_pref = 1; 1113 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1114 1115 /* Stop done bit will be set as a result of error injection */ 1116 rs = npi_txdma_channel_disable(handle, channel); 1117 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1118 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1119 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1120 "<== nxge_txdma_stop_inj_err (channel %d): " 1121 "stopped OK ", channel)); 1122 return (status); 1123 } 1124 1125 #if defined(NXGE_DEBUG) 1126 nxge_txdma_regs_dump_channels(nxgep); 1127 #endif 1128 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1129 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1130 " (injected error but still not stopped)", channel, rs)); 1131 1132 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1133 return (status); 1134 } 1135 1136 void 1137 nxge_hw_start_tx(p_nxge_t nxgep) 1138 { 1139 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx")); 1140 1141 (void) nxge_txdma_hw_start(nxgep); 1142 (void) nxge_tx_mac_enable(nxgep); 1143 1144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx")); 1145 } 1146 1147 /*ARGSUSED*/ 1148 void 1149 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1150 { 1151 int index, ndmas; 1152 uint16_t channel; 1153 p_tx_rings_t tx_rings; 1154 1155 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1156 1157 /* 1158 * For each transmit channel, reclaim each descriptor and 1159 * free buffers. 1160 */ 1161 tx_rings = nxgep->tx_rings; 1162 if (tx_rings == NULL) { 1163 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1164 "<== nxge_fixup_txdma_rings: NULL ring pointer")); 1165 return; 1166 } 1167 1168 ndmas = tx_rings->ndmas; 1169 if (!ndmas) { 1170 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1171 "<== nxge_fixup_txdma_rings: no channel allocated")); 1172 return; 1173 } 1174 1175 if (tx_rings->rings == NULL) { 1176 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1177 "<== nxge_fixup_txdma_rings: NULL rings pointer")); 1178 return; 1179 } 1180 1181 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: " 1182 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1183 tx_rings, tx_rings->rings, ndmas)); 1184 1185 for (index = 0; index < ndmas; index++) { 1186 channel = tx_rings->rings[index]->tdc; 1187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1188 "==> nxge_fixup_txdma_rings: channel %d", channel)); 1189 1190 nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index], 1191 channel); 1192 } 1193 1194 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1195 } 1196 1197 /*ARGSUSED*/ 1198 void 1199 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1200 { 1201 p_tx_ring_t ring_p; 1202 1203 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1204 ring_p = nxge_txdma_get_ring(nxgep, channel); 1205 if (ring_p == NULL) { 1206 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1207 return; 1208 } 1209 1210 if (ring_p->tdc != channel) { 1211 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1212 "<== nxge_txdma_fix_channel: channel not matched " 1213 "ring tdc %d passed channel", 1214 ring_p->tdc, channel)); 1215 return; 1216 } 1217 1218 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1219 1220 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1221 } 1222 1223 /*ARGSUSED*/ 1224 void 1225 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1226 { 1227 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1228 1229 if (ring_p == NULL) { 1230 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1231 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1232 return; 1233 } 1234 1235 if (ring_p->tdc != channel) { 1236 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1237 "<== nxge_txdma_fixup_channel: channel not matched " 1238 "ring tdc %d passed channel", 1239 ring_p->tdc, channel)); 1240 return; 1241 } 1242 1243 MUTEX_ENTER(&ring_p->lock); 1244 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1245 ring_p->rd_index = 0; 1246 ring_p->wr_index = 0; 1247 ring_p->ring_head.value = 0; 1248 ring_p->ring_kick_tail.value = 0; 1249 ring_p->descs_pending = 0; 1250 MUTEX_EXIT(&ring_p->lock); 1251 1252 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1253 } 1254 1255 /*ARGSUSED*/ 1256 void 1257 nxge_txdma_hw_kick(p_nxge_t nxgep) 1258 { 1259 int index, ndmas; 1260 uint16_t channel; 1261 p_tx_rings_t tx_rings; 1262 1263 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1264 1265 tx_rings = nxgep->tx_rings; 1266 if (tx_rings == NULL) { 1267 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1268 "<== nxge_txdma_hw_kick: NULL ring pointer")); 1269 return; 1270 } 1271 1272 ndmas = tx_rings->ndmas; 1273 if (!ndmas) { 1274 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1275 "<== nxge_txdma_hw_kick: no channel allocated")); 1276 return; 1277 } 1278 1279 if (tx_rings->rings == NULL) { 1280 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1281 "<== nxge_txdma_hw_kick: NULL rings pointer")); 1282 return; 1283 } 1284 1285 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: " 1286 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1287 tx_rings, tx_rings->rings, ndmas)); 1288 1289 for (index = 0; index < ndmas; index++) { 1290 channel = tx_rings->rings[index]->tdc; 1291 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1292 "==> nxge_txdma_hw_kick: channel %d", channel)); 1293 nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index], 1294 channel); 1295 } 1296 1297 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1298 } 1299 1300 /*ARGSUSED*/ 1301 void 1302 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1303 { 1304 p_tx_ring_t ring_p; 1305 1306 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1307 1308 ring_p = nxge_txdma_get_ring(nxgep, channel); 1309 if (ring_p == NULL) { 1310 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1311 " nxge_txdma_kick_channel")); 1312 return; 1313 } 1314 1315 if (ring_p->tdc != channel) { 1316 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1317 "<== nxge_txdma_kick_channel: channel not matched " 1318 "ring tdc %d passed channel", 1319 ring_p->tdc, channel)); 1320 return; 1321 } 1322 1323 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1324 1325 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1326 } 1327 1328 /*ARGSUSED*/ 1329 void 1330 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1331 { 1332 1333 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1334 1335 if (ring_p == NULL) { 1336 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1337 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1338 return; 1339 } 1340 1341 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1342 } 1343 1344 /*ARGSUSED*/ 1345 void 1346 nxge_check_tx_hang(p_nxge_t nxgep) 1347 { 1348 1349 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1350 1351 /* 1352 * Needs inputs from hardware for regs: 1353 * head index had not moved since last timeout. 1354 * packets not transmitted or stuffed registers. 1355 */ 1356 if (nxge_txdma_hung(nxgep)) { 1357 nxge_fixup_hung_txdma_rings(nxgep); 1358 } 1359 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1360 } 1361 1362 int 1363 nxge_txdma_hung(p_nxge_t nxgep) 1364 { 1365 int index, ndmas; 1366 uint16_t channel; 1367 p_tx_rings_t tx_rings; 1368 p_tx_ring_t tx_ring_p; 1369 1370 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1371 tx_rings = nxgep->tx_rings; 1372 if (tx_rings == NULL) { 1373 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1374 "<== nxge_txdma_hung: NULL ring pointer")); 1375 return (B_FALSE); 1376 } 1377 1378 ndmas = tx_rings->ndmas; 1379 if (!ndmas) { 1380 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1381 "<== nxge_txdma_hung: no channel " 1382 "allocated")); 1383 return (B_FALSE); 1384 } 1385 1386 if (tx_rings->rings == NULL) { 1387 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1388 "<== nxge_txdma_hung: NULL rings pointer")); 1389 return (B_FALSE); 1390 } 1391 1392 for (index = 0; index < ndmas; index++) { 1393 channel = tx_rings->rings[index]->tdc; 1394 tx_ring_p = tx_rings->rings[index]; 1395 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1396 "==> nxge_txdma_hung: channel %d", channel)); 1397 if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) { 1398 return (B_TRUE); 1399 } 1400 } 1401 1402 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1403 1404 return (B_FALSE); 1405 } 1406 1407 int 1408 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1409 { 1410 uint16_t head_index, tail_index; 1411 boolean_t head_wrap, tail_wrap; 1412 npi_handle_t handle; 1413 tx_ring_hdl_t tx_head; 1414 uint_t tx_rd_index; 1415 1416 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1417 1418 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1419 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1420 "==> nxge_txdma_channel_hung: channel %d", channel)); 1421 MUTEX_ENTER(&tx_ring_p->lock); 1422 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1423 1424 tail_index = tx_ring_p->wr_index; 1425 tail_wrap = tx_ring_p->wr_index_wrap; 1426 tx_rd_index = tx_ring_p->rd_index; 1427 MUTEX_EXIT(&tx_ring_p->lock); 1428 1429 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1430 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1431 "tail_index %d tail_wrap %d ", 1432 channel, tx_rd_index, tail_index, tail_wrap)); 1433 /* 1434 * Read the hardware maintained transmit head 1435 * and wrap around bit. 1436 */ 1437 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1438 head_index = tx_head.bits.ldw.head; 1439 head_wrap = tx_head.bits.ldw.wrap; 1440 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1441 "==> nxge_txdma_channel_hung: " 1442 "tx_rd_index %d tail %d tail_wrap %d " 1443 "head %d wrap %d", 1444 tx_rd_index, tail_index, tail_wrap, 1445 head_index, head_wrap)); 1446 1447 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1448 tail_index, tail_wrap) && 1449 (head_index == tx_rd_index)) { 1450 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1451 "==> nxge_txdma_channel_hung: EMPTY")); 1452 return (B_FALSE); 1453 } 1454 1455 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1456 "==> nxge_txdma_channel_hung: Checking if ring full")); 1457 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1458 tail_wrap)) { 1459 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1460 "==> nxge_txdma_channel_hung: full")); 1461 return (B_TRUE); 1462 } 1463 1464 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1465 1466 return (B_FALSE); 1467 } 1468 1469 /*ARGSUSED*/ 1470 void 1471 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1472 { 1473 int index, ndmas; 1474 uint16_t channel; 1475 p_tx_rings_t tx_rings; 1476 1477 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1478 tx_rings = nxgep->tx_rings; 1479 if (tx_rings == NULL) { 1480 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1481 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer")); 1482 return; 1483 } 1484 1485 ndmas = tx_rings->ndmas; 1486 if (!ndmas) { 1487 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1488 "<== nxge_fixup_hung_txdma_rings: no channel " 1489 "allocated")); 1490 return; 1491 } 1492 1493 if (tx_rings->rings == NULL) { 1494 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1495 "<== nxge_fixup_hung_txdma_rings: NULL rings pointer")); 1496 return; 1497 } 1498 1499 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: " 1500 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1501 tx_rings, tx_rings->rings, ndmas)); 1502 1503 for (index = 0; index < ndmas; index++) { 1504 channel = tx_rings->rings[index]->tdc; 1505 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1506 "==> nxge_fixup_hung_txdma_rings: channel %d", 1507 channel)); 1508 1509 nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index], 1510 channel); 1511 } 1512 1513 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1514 } 1515 1516 /*ARGSUSED*/ 1517 void 1518 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1519 { 1520 p_tx_ring_t ring_p; 1521 1522 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1523 ring_p = nxge_txdma_get_ring(nxgep, channel); 1524 if (ring_p == NULL) { 1525 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1526 "<== nxge_txdma_fix_hung_channel")); 1527 return; 1528 } 1529 1530 if (ring_p->tdc != channel) { 1531 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1532 "<== nxge_txdma_fix_hung_channel: channel not matched " 1533 "ring tdc %d passed channel", 1534 ring_p->tdc, channel)); 1535 return; 1536 } 1537 1538 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1539 1540 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1541 } 1542 1543 /*ARGSUSED*/ 1544 void 1545 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1546 uint16_t channel) 1547 { 1548 npi_handle_t handle; 1549 tdmc_intr_dbg_t intr_dbg; 1550 int status = NXGE_OK; 1551 1552 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1553 1554 if (ring_p == NULL) { 1555 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1556 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1557 return; 1558 } 1559 1560 if (ring_p->tdc != channel) { 1561 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1562 "<== nxge_txdma_fixup_hung_channel: channel " 1563 "not matched " 1564 "ring tdc %d passed channel", 1565 ring_p->tdc, channel)); 1566 return; 1567 } 1568 1569 /* Reclaim descriptors */ 1570 MUTEX_ENTER(&ring_p->lock); 1571 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1572 MUTEX_EXIT(&ring_p->lock); 1573 1574 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1575 /* 1576 * Stop the dma channel waits for the stop done. 1577 * If the stop done bit is not set, then force 1578 * an error. 1579 */ 1580 status = npi_txdma_channel_disable(handle, channel); 1581 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1582 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1583 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1584 "ring tdc %d passed channel %d", 1585 ring_p->tdc, channel)); 1586 return; 1587 } 1588 1589 /* Inject any error */ 1590 intr_dbg.value = 0; 1591 intr_dbg.bits.ldw.nack_pref = 1; 1592 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1593 1594 /* Stop done bit will be set as a result of error injection */ 1595 status = npi_txdma_channel_disable(handle, channel); 1596 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1597 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1598 "<== nxge_txdma_fixup_hung_channel: stopped again" 1599 "ring tdc %d passed channel", 1600 ring_p->tdc, channel)); 1601 return; 1602 } 1603 1604 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1605 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1606 "ring tdc %d passed channel", 1607 ring_p->tdc, channel)); 1608 1609 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1610 } 1611 1612 /*ARGSUSED*/ 1613 void 1614 nxge_reclaim_rings(p_nxge_t nxgep) 1615 { 1616 int index, ndmas; 1617 uint16_t channel; 1618 p_tx_rings_t tx_rings; 1619 p_tx_ring_t tx_ring_p; 1620 1621 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring")); 1622 tx_rings = nxgep->tx_rings; 1623 if (tx_rings == NULL) { 1624 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1625 "<== nxge_reclain_rimgs: NULL ring pointer")); 1626 return; 1627 } 1628 1629 ndmas = tx_rings->ndmas; 1630 if (!ndmas) { 1631 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1632 "<== nxge_reclain_rimgs: no channel " 1633 "allocated")); 1634 return; 1635 } 1636 1637 if (tx_rings->rings == NULL) { 1638 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1639 "<== nxge_reclain_rimgs: NULL rings pointer")); 1640 return; 1641 } 1642 1643 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: " 1644 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1645 tx_rings, tx_rings->rings, ndmas)); 1646 1647 for (index = 0; index < ndmas; index++) { 1648 channel = tx_rings->rings[index]->tdc; 1649 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1650 "==> reclain_rimgs: channel %d", 1651 channel)); 1652 tx_ring_p = tx_rings->rings[index]; 1653 MUTEX_ENTER(&tx_ring_p->lock); 1654 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel); 1655 MUTEX_EXIT(&tx_ring_p->lock); 1656 } 1657 1658 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1659 } 1660 1661 void 1662 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1663 { 1664 int index, ndmas; 1665 uint16_t channel; 1666 p_tx_rings_t tx_rings; 1667 npi_handle_t handle; 1668 1669 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels")); 1670 1671 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1672 (void) npi_txdma_dump_fzc_regs(handle); 1673 1674 tx_rings = nxgep->tx_rings; 1675 if (tx_rings == NULL) { 1676 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1677 "<== nxge_txdma_regs_dump_channels: NULL ring")); 1678 return; 1679 } 1680 1681 ndmas = tx_rings->ndmas; 1682 if (!ndmas) { 1683 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1684 "<== nxge_txdma_regs_dump_channels: " 1685 "no channel allocated")); 1686 return; 1687 } 1688 1689 if (tx_rings->rings == NULL) { 1690 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1691 "<== nxge_txdma_regs_dump_channels: NULL rings")); 1692 return; 1693 } 1694 1695 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: " 1696 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1697 tx_rings, tx_rings->rings, ndmas)); 1698 1699 for (index = 0; index < ndmas; index++) { 1700 channel = tx_rings->rings[index]->tdc; 1701 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1702 "==> nxge_txdma_regs_dump_channels: channel %d", 1703 channel)); 1704 (void) npi_txdma_dump_tdc_regs(handle, channel); 1705 } 1706 1707 /* Dump TXC registers */ 1708 (void) npi_txc_dump_fzc_regs(handle); 1709 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 1710 1711 for (index = 0; index < ndmas; index++) { 1712 channel = tx_rings->rings[index]->tdc; 1713 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1714 "==> nxge_txdma_regs_dump_channels: channel %d", 1715 channel)); 1716 (void) npi_txc_dump_tdc_fzc_regs(handle, channel); 1717 } 1718 1719 for (index = 0; index < ndmas; index++) { 1720 channel = tx_rings->rings[index]->tdc; 1721 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1722 "==> nxge_txdma_regs_dump_channels: channel %d", 1723 channel)); 1724 nxge_txdma_regs_dump(nxgep, channel); 1725 } 1726 1727 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 1728 1729 } 1730 1731 void 1732 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 1733 { 1734 npi_handle_t handle; 1735 tx_ring_hdl_t hdl; 1736 tx_ring_kick_t kick; 1737 tx_cs_t cs; 1738 txc_control_t control; 1739 uint32_t bitmap = 0; 1740 uint32_t burst = 0; 1741 uint32_t bytes = 0; 1742 dma_log_page_t cfg; 1743 1744 printf("\n\tfunc # %d tdc %d ", 1745 nxgep->function_num, channel); 1746 cfg.page_num = 0; 1747 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1748 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1749 printf("\n\tlog page func %d valid page 0 %d", 1750 cfg.func_num, cfg.valid); 1751 cfg.page_num = 1; 1752 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1753 printf("\n\tlog page func %d valid page 1 %d", 1754 cfg.func_num, cfg.valid); 1755 1756 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 1757 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 1758 printf("\n\thead value is 0x%0llx", 1759 (long long)hdl.value); 1760 printf("\n\thead index %d", hdl.bits.ldw.head); 1761 printf("\n\tkick value is 0x%0llx", 1762 (long long)kick.value); 1763 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 1764 1765 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 1766 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 1767 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 1768 1769 (void) npi_txc_control(handle, OP_GET, &control); 1770 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 1771 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 1772 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 1773 1774 printf("\n\tTXC port control 0x%0llx", 1775 (long long)control.value); 1776 printf("\n\tTXC port bitmap 0x%x", bitmap); 1777 printf("\n\tTXC max burst %d", burst); 1778 printf("\n\tTXC bytes xmt %d\n", bytes); 1779 1780 { 1781 ipp_status_t status; 1782 1783 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 1784 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 1785 } 1786 } 1787 1788 /* 1789 * Static functions start here. 1790 */ 1791 static nxge_status_t 1792 nxge_map_txdma(p_nxge_t nxgep) 1793 { 1794 int i, ndmas; 1795 uint16_t channel; 1796 p_tx_rings_t tx_rings; 1797 p_tx_ring_t *tx_desc_rings; 1798 p_tx_mbox_areas_t tx_mbox_areas_p; 1799 p_tx_mbox_t *tx_mbox_p; 1800 p_nxge_dma_pool_t dma_buf_poolp; 1801 p_nxge_dma_pool_t dma_cntl_poolp; 1802 p_nxge_dma_common_t *dma_buf_p; 1803 p_nxge_dma_common_t *dma_cntl_p; 1804 nxge_status_t status = NXGE_OK; 1805 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1806 p_nxge_dma_common_t t_dma_buf_p; 1807 p_nxge_dma_common_t t_dma_cntl_p; 1808 #endif 1809 1810 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 1811 1812 dma_buf_poolp = nxgep->tx_buf_pool_p; 1813 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 1814 1815 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1816 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1817 "==> nxge_map_txdma: buf not allocated")); 1818 return (NXGE_ERROR); 1819 } 1820 1821 ndmas = dma_buf_poolp->ndmas; 1822 if (!ndmas) { 1823 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1824 "<== nxge_map_txdma: no dma allocated")); 1825 return (NXGE_ERROR); 1826 } 1827 1828 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1829 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1830 1831 tx_rings = (p_tx_rings_t) 1832 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1833 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1834 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1835 1836 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1837 "tx_rings $%p tx_desc_rings $%p", 1838 tx_rings, tx_desc_rings)); 1839 1840 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1841 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1842 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1843 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1844 1845 /* 1846 * Map descriptors from the buffer pools for each dma channel. 1847 */ 1848 for (i = 0; i < ndmas; i++) { 1849 /* 1850 * Set up and prepare buffer blocks, descriptors 1851 * and mailbox. 1852 */ 1853 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1854 status = nxge_map_txdma_channel(nxgep, channel, 1855 (p_nxge_dma_common_t *)&dma_buf_p[i], 1856 (p_tx_ring_t *)&tx_desc_rings[i], 1857 dma_buf_poolp->num_chunks[i], 1858 (p_nxge_dma_common_t *)&dma_cntl_p[i], 1859 (p_tx_mbox_t *)&tx_mbox_p[i]); 1860 if (status != NXGE_OK) { 1861 goto nxge_map_txdma_fail1; 1862 } 1863 tx_desc_rings[i]->index = (uint16_t)i; 1864 tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i]; 1865 1866 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1867 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 1868 tx_desc_rings[i]->hv_set = B_FALSE; 1869 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 1870 t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i]; 1871 1872 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp = 1873 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 1874 tx_desc_rings[i]->hv_tx_buf_ioaddr_size = 1875 (uint64_t)t_dma_buf_p->orig_alength; 1876 1877 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1878 "==> nxge_map_txdma_channel: " 1879 "hv data buf base io $%p " 1880 "size 0x%llx (%d) " 1881 "buf base io $%p " 1882 "orig vatopa base io $%p " 1883 "orig_len 0x%llx (%d)", 1884 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp, 1885 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1886 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1887 t_dma_buf_p->ioaddr_pp, 1888 t_dma_buf_p->orig_vatopa, 1889 t_dma_buf_p->orig_alength, 1890 t_dma_buf_p->orig_alength)); 1891 1892 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp = 1893 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 1894 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size = 1895 (uint64_t)t_dma_cntl_p->orig_alength; 1896 1897 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1898 "==> nxge_map_txdma_channel: " 1899 "hv cntl base io $%p " 1900 "orig ioaddr_pp ($%p) " 1901 "orig vatopa ($%p) " 1902 "size 0x%llx (%d 0x%x)", 1903 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp, 1904 t_dma_cntl_p->orig_ioaddr_pp, 1905 t_dma_cntl_p->orig_vatopa, 1906 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size, 1907 t_dma_cntl_p->orig_alength, 1908 t_dma_cntl_p->orig_alength)); 1909 } 1910 #endif 1911 } 1912 1913 tx_rings->ndmas = ndmas; 1914 tx_rings->rings = tx_desc_rings; 1915 nxgep->tx_rings = tx_rings; 1916 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1917 nxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1918 1919 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1920 "tx_rings $%p rings $%p", 1921 nxgep->tx_rings, nxgep->tx_rings->rings)); 1922 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1923 "tx_rings $%p tx_desc_rings $%p", 1924 nxgep->tx_rings, tx_desc_rings)); 1925 1926 goto nxge_map_txdma_exit; 1927 1928 nxge_map_txdma_fail1: 1929 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1930 "==> nxge_map_txdma: uninit tx desc " 1931 "(status 0x%x channel %d i %d)", 1932 nxgep, status, channel, i)); 1933 i--; 1934 for (; i >= 0; i--) { 1935 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1936 nxge_unmap_txdma_channel(nxgep, channel, 1937 tx_desc_rings[i], 1938 tx_mbox_p[i]); 1939 } 1940 1941 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1942 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1943 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1944 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1945 1946 nxge_map_txdma_exit: 1947 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1948 "==> nxge_map_txdma: " 1949 "(status 0x%x channel %d)", 1950 status, channel)); 1951 1952 return (status); 1953 } 1954 1955 static void 1956 nxge_unmap_txdma(p_nxge_t nxgep) 1957 { 1958 int i, ndmas; 1959 uint8_t channel; 1960 p_tx_rings_t tx_rings; 1961 p_tx_ring_t *tx_desc_rings; 1962 p_tx_mbox_areas_t tx_mbox_areas_p; 1963 p_tx_mbox_t *tx_mbox_p; 1964 p_nxge_dma_pool_t dma_buf_poolp; 1965 1966 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma")); 1967 1968 dma_buf_poolp = nxgep->tx_buf_pool_p; 1969 if (!dma_buf_poolp->buf_allocated) { 1970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1971 "==> nxge_unmap_txdma: buf not allocated")); 1972 return; 1973 } 1974 1975 ndmas = dma_buf_poolp->ndmas; 1976 if (!ndmas) { 1977 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1978 "<== nxge_unmap_txdma: no dma allocated")); 1979 return; 1980 } 1981 1982 tx_rings = nxgep->tx_rings; 1983 tx_desc_rings = tx_rings->rings; 1984 if (tx_rings == NULL) { 1985 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1986 "<== nxge_unmap_txdma: NULL ring pointer")); 1987 return; 1988 } 1989 1990 tx_desc_rings = tx_rings->rings; 1991 if (tx_desc_rings == NULL) { 1992 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1993 "<== nxge_unmap_txdma: NULL ring pointers")); 1994 return; 1995 } 1996 1997 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: " 1998 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1999 tx_rings, tx_desc_rings, ndmas)); 2000 2001 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2002 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2003 2004 for (i = 0; i < ndmas; i++) { 2005 channel = tx_desc_rings[i]->tdc; 2006 (void) nxge_unmap_txdma_channel(nxgep, channel, 2007 (p_tx_ring_t)tx_desc_rings[i], 2008 (p_tx_mbox_t)tx_mbox_p[i]); 2009 } 2010 2011 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 2012 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 2013 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 2014 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 2015 2016 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2017 "<== nxge_unmap_txdma")); 2018 } 2019 2020 static nxge_status_t 2021 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2022 p_nxge_dma_common_t *dma_buf_p, 2023 p_tx_ring_t *tx_desc_p, 2024 uint32_t num_chunks, 2025 p_nxge_dma_common_t *dma_cntl_p, 2026 p_tx_mbox_t *tx_mbox_p) 2027 { 2028 int status = NXGE_OK; 2029 2030 /* 2031 * Set up and prepare buffer blocks, descriptors 2032 * and mailbox. 2033 */ 2034 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2035 "==> nxge_map_txdma_channel (channel %d)", channel)); 2036 /* 2037 * Transmit buffer blocks 2038 */ 2039 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2040 dma_buf_p, tx_desc_p, num_chunks); 2041 if (status != NXGE_OK) { 2042 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2043 "==> nxge_map_txdma_channel (channel %d): " 2044 "map buffer failed 0x%x", channel, status)); 2045 goto nxge_map_txdma_channel_exit; 2046 } 2047 2048 /* 2049 * Transmit block ring, and mailbox. 2050 */ 2051 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2052 tx_mbox_p); 2053 2054 goto nxge_map_txdma_channel_exit; 2055 2056 nxge_map_txdma_channel_fail1: 2057 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2058 "==> nxge_map_txdma_channel: unmap buf" 2059 "(status 0x%x channel %d)", 2060 status, channel)); 2061 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2062 2063 nxge_map_txdma_channel_exit: 2064 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2065 "<== nxge_map_txdma_channel: " 2066 "(status 0x%x channel %d)", 2067 status, channel)); 2068 2069 return (status); 2070 } 2071 2072 /*ARGSUSED*/ 2073 static void 2074 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2075 p_tx_ring_t tx_ring_p, 2076 p_tx_mbox_t tx_mbox_p) 2077 { 2078 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2079 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2080 /* 2081 * unmap tx block ring, and mailbox. 2082 */ 2083 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, 2084 tx_ring_p, tx_mbox_p); 2085 2086 /* unmap buffer blocks */ 2087 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p); 2088 2089 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2090 } 2091 2092 /*ARGSUSED*/ 2093 static void 2094 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2095 p_nxge_dma_common_t *dma_cntl_p, 2096 p_tx_ring_t tx_ring_p, 2097 p_tx_mbox_t *tx_mbox_p) 2098 { 2099 p_tx_mbox_t mboxp; 2100 p_nxge_dma_common_t cntl_dmap; 2101 p_nxge_dma_common_t dmap; 2102 p_tx_rng_cfig_t tx_ring_cfig_p; 2103 p_tx_ring_kick_t tx_ring_kick_p; 2104 p_tx_cs_t tx_cs_p; 2105 p_tx_dma_ent_msk_t tx_evmask_p; 2106 p_txdma_mbh_t mboxh_p; 2107 p_txdma_mbl_t mboxl_p; 2108 uint64_t tx_desc_len; 2109 2110 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2111 "==> nxge_map_txdma_channel_cfg_ring")); 2112 2113 cntl_dmap = *dma_cntl_p; 2114 2115 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2116 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2117 sizeof (tx_desc_t)); 2118 /* 2119 * Zero out transmit ring descriptors. 2120 */ 2121 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2122 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2123 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2124 tx_cs_p = &(tx_ring_p->tx_cs); 2125 tx_evmask_p = &(tx_ring_p->tx_evmask); 2126 tx_ring_cfig_p->value = 0; 2127 tx_ring_kick_p->value = 0; 2128 tx_cs_p->value = 0; 2129 tx_evmask_p->value = 0; 2130 2131 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2132 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2133 dma_channel, 2134 dmap->dma_cookie.dmac_laddress)); 2135 2136 tx_ring_cfig_p->value = 0; 2137 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2138 tx_ring_cfig_p->value = 2139 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2140 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2141 2142 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2143 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2144 dma_channel, 2145 tx_ring_cfig_p->value)); 2146 2147 tx_cs_p->bits.ldw.rst = 1; 2148 2149 /* Map in mailbox */ 2150 mboxp = (p_tx_mbox_t) 2151 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2152 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2153 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2154 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2155 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2156 mboxh_p->value = mboxl_p->value = 0; 2157 2158 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2159 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2160 dmap->dma_cookie.dmac_laddress)); 2161 2162 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2163 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2164 2165 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2166 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2167 2168 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2169 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2170 dmap->dma_cookie.dmac_laddress)); 2171 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2172 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2173 "mbox $%p", 2174 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2175 tx_ring_p->page_valid.value = 0; 2176 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2177 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2178 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2179 tx_ring_p->page_hdl.value = 0; 2180 2181 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2182 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2183 2184 tx_ring_p->max_burst.value = 0; 2185 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2186 2187 *tx_mbox_p = mboxp; 2188 2189 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2190 "<== nxge_map_txdma_channel_cfg_ring")); 2191 } 2192 2193 /*ARGSUSED*/ 2194 static void 2195 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2196 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2197 { 2198 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2199 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2200 tx_ring_p->tdc)); 2201 2202 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2203 2204 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2205 "<== nxge_unmap_txdma_channel_cfg_ring")); 2206 } 2207 2208 static nxge_status_t 2209 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2210 p_nxge_dma_common_t *dma_buf_p, 2211 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2212 { 2213 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2214 p_nxge_dma_common_t dmap; 2215 nxge_os_dma_handle_t tx_buf_dma_handle; 2216 p_tx_ring_t tx_ring_p; 2217 p_tx_msg_t tx_msg_ring; 2218 nxge_status_t status = NXGE_OK; 2219 int ddi_status = DDI_SUCCESS; 2220 int i, j, index; 2221 uint32_t size, bsize; 2222 uint32_t nblocks, nmsgs; 2223 2224 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2225 "==> nxge_map_txdma_channel_buf_ring")); 2226 2227 dma_bufp = tmp_bufp = *dma_buf_p; 2228 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2229 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2230 "chunks bufp $%p", 2231 channel, num_chunks, dma_bufp)); 2232 2233 nmsgs = 0; 2234 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2235 nmsgs += tmp_bufp->nblocks; 2236 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2237 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2238 "bufp $%p nblocks %d nmsgs %d", 2239 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2240 } 2241 if (!nmsgs) { 2242 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2243 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2244 "no msg blocks", 2245 channel)); 2246 status = NXGE_ERROR; 2247 goto nxge_map_txdma_channel_buf_ring_exit; 2248 } 2249 2250 tx_ring_p = (p_tx_ring_t) 2251 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2252 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2253 (void *)nxgep->interrupt_cookie); 2254 /* 2255 * Allocate transmit message rings and handles for packets 2256 * not to be copied to premapped buffers. 2257 */ 2258 size = nmsgs * sizeof (tx_msg_t); 2259 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2260 for (i = 0; i < nmsgs; i++) { 2261 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2262 DDI_DMA_DONTWAIT, 0, 2263 &tx_msg_ring[i].dma_handle); 2264 if (ddi_status != DDI_SUCCESS) { 2265 status |= NXGE_DDI_FAILED; 2266 break; 2267 } 2268 } 2269 if (i < nmsgs) { 2270 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "Allocate handles failed.")); 2271 goto nxge_map_txdma_channel_buf_ring_fail1; 2272 } 2273 2274 tx_ring_p->tdc = channel; 2275 tx_ring_p->tx_msg_ring = tx_msg_ring; 2276 tx_ring_p->tx_ring_size = nmsgs; 2277 tx_ring_p->num_chunks = num_chunks; 2278 if (!nxge_tx_intr_thres) { 2279 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2280 } 2281 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2282 tx_ring_p->rd_index = 0; 2283 tx_ring_p->wr_index = 0; 2284 tx_ring_p->ring_head.value = 0; 2285 tx_ring_p->ring_kick_tail.value = 0; 2286 tx_ring_p->descs_pending = 0; 2287 2288 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2289 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2290 "actual tx desc max %d nmsgs %d " 2291 "(config nxge_tx_ring_size %d)", 2292 channel, tx_ring_p->tx_ring_size, nmsgs, 2293 nxge_tx_ring_size)); 2294 2295 /* 2296 * Map in buffers from the buffer pool. 2297 */ 2298 index = 0; 2299 bsize = dma_bufp->block_size; 2300 2301 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2302 "dma_bufp $%p tx_rng_p $%p " 2303 "tx_msg_rng_p $%p bsize %d", 2304 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2305 2306 tx_buf_dma_handle = dma_bufp->dma_handle; 2307 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2308 bsize = dma_bufp->block_size; 2309 nblocks = dma_bufp->nblocks; 2310 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2311 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2312 "size %d dma_bufp $%p", 2313 i, sizeof (nxge_dma_common_t), dma_bufp)); 2314 2315 for (j = 0; j < nblocks; j++) { 2316 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2317 dmap = &tx_msg_ring[index++].buf_dma; 2318 #ifdef TX_MEM_DEBUG 2319 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2320 "==> nxge_map_txdma_channel_buf_ring: j %d" 2321 "dmap $%p", i, dmap)); 2322 #endif 2323 nxge_setup_dma_common(dmap, dma_bufp, 1, 2324 bsize); 2325 } 2326 } 2327 2328 if (i < num_chunks) { 2329 goto nxge_map_txdma_channel_buf_ring_fail1; 2330 } 2331 2332 *tx_desc_p = tx_ring_p; 2333 2334 goto nxge_map_txdma_channel_buf_ring_exit; 2335 2336 nxge_map_txdma_channel_buf_ring_fail1: 2337 index--; 2338 for (; index >= 0; index--) { 2339 if (tx_msg_ring[i].dma_handle != NULL) { 2340 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2341 } 2342 } 2343 MUTEX_DESTROY(&tx_ring_p->lock); 2344 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2345 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2346 2347 nxge_map_txdma_channel_buf_ring_exit: 2348 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2349 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2350 2351 return (status); 2352 } 2353 2354 /*ARGSUSED*/ 2355 static void 2356 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2357 { 2358 p_tx_msg_t tx_msg_ring; 2359 p_tx_msg_t tx_msg_p; 2360 int i; 2361 2362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2363 "==> nxge_unmap_txdma_channel_buf_ring")); 2364 if (tx_ring_p == NULL) { 2365 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2366 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2367 return; 2368 } 2369 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2370 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2371 tx_ring_p->tdc)); 2372 2373 tx_msg_ring = tx_ring_p->tx_msg_ring; 2374 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2375 tx_msg_p = &tx_msg_ring[i]; 2376 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2377 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2378 "entry = %d", 2379 i)); 2380 (void) dvma_unload(tx_msg_p->dvma_handle, 2381 0, -1); 2382 tx_msg_p->dvma_handle = NULL; 2383 if (tx_ring_p->dvma_wr_index == 2384 tx_ring_p->dvma_wrap_mask) { 2385 tx_ring_p->dvma_wr_index = 0; 2386 } else { 2387 tx_ring_p->dvma_wr_index++; 2388 } 2389 tx_ring_p->dvma_pending--; 2390 } else if (tx_msg_p->flags.dma_type == 2391 USE_DMA) { 2392 if (ddi_dma_unbind_handle 2393 (tx_msg_p->dma_handle)) { 2394 cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: " 2395 "ddi_dma_unbind_handle " 2396 "failed."); 2397 } 2398 } 2399 2400 if (tx_msg_p->tx_message != NULL) { 2401 freemsg(tx_msg_p->tx_message); 2402 tx_msg_p->tx_message = NULL; 2403 } 2404 } 2405 2406 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2407 if (tx_msg_ring[i].dma_handle != NULL) { 2408 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2409 } 2410 } 2411 2412 MUTEX_DESTROY(&tx_ring_p->lock); 2413 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2414 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2415 2416 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2417 "<== nxge_unmap_txdma_channel_buf_ring")); 2418 } 2419 2420 static nxge_status_t 2421 nxge_txdma_hw_start(p_nxge_t nxgep) 2422 { 2423 int i, ndmas; 2424 uint16_t channel; 2425 p_tx_rings_t tx_rings; 2426 p_tx_ring_t *tx_desc_rings; 2427 p_tx_mbox_areas_t tx_mbox_areas_p; 2428 p_tx_mbox_t *tx_mbox_p; 2429 nxge_status_t status = NXGE_OK; 2430 2431 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2432 2433 tx_rings = nxgep->tx_rings; 2434 if (tx_rings == NULL) { 2435 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2436 "<== nxge_txdma_hw_start: NULL ring pointer")); 2437 return (NXGE_ERROR); 2438 } 2439 tx_desc_rings = tx_rings->rings; 2440 if (tx_desc_rings == NULL) { 2441 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2442 "<== nxge_txdma_hw_start: NULL ring pointers")); 2443 return (NXGE_ERROR); 2444 } 2445 2446 ndmas = tx_rings->ndmas; 2447 if (!ndmas) { 2448 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2449 "<== nxge_txdma_hw_start: no dma channel allocated")); 2450 return (NXGE_ERROR); 2451 } 2452 2453 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2454 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2455 tx_rings, tx_desc_rings, ndmas)); 2456 2457 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2458 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2459 2460 for (i = 0; i < ndmas; i++) { 2461 channel = tx_desc_rings[i]->tdc, 2462 status = nxge_txdma_start_channel(nxgep, channel, 2463 (p_tx_ring_t)tx_desc_rings[i], 2464 (p_tx_mbox_t)tx_mbox_p[i]); 2465 if (status != NXGE_OK) { 2466 goto nxge_txdma_hw_start_fail1; 2467 } 2468 } 2469 2470 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2471 "tx_rings $%p rings $%p", 2472 nxgep->tx_rings, nxgep->tx_rings->rings)); 2473 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2474 "tx_rings $%p tx_desc_rings $%p", 2475 nxgep->tx_rings, tx_desc_rings)); 2476 2477 goto nxge_txdma_hw_start_exit; 2478 2479 nxge_txdma_hw_start_fail1: 2480 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2481 "==> nxge_txdma_hw_start: disable " 2482 "(status 0x%x channel %d i %d)", status, channel, i)); 2483 for (; i >= 0; i--) { 2484 channel = tx_desc_rings[i]->tdc, 2485 (void) nxge_txdma_stop_channel(nxgep, channel, 2486 (p_tx_ring_t)tx_desc_rings[i], 2487 (p_tx_mbox_t)tx_mbox_p[i]); 2488 } 2489 2490 nxge_txdma_hw_start_exit: 2491 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2492 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2493 2494 return (status); 2495 } 2496 2497 static void 2498 nxge_txdma_hw_stop(p_nxge_t nxgep) 2499 { 2500 int i, ndmas; 2501 uint16_t channel; 2502 p_tx_rings_t tx_rings; 2503 p_tx_ring_t *tx_desc_rings; 2504 p_tx_mbox_areas_t tx_mbox_areas_p; 2505 p_tx_mbox_t *tx_mbox_p; 2506 2507 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop")); 2508 2509 tx_rings = nxgep->tx_rings; 2510 if (tx_rings == NULL) { 2511 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2512 "<== nxge_txdma_hw_stop: NULL ring pointer")); 2513 return; 2514 } 2515 tx_desc_rings = tx_rings->rings; 2516 if (tx_desc_rings == NULL) { 2517 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2518 "<== nxge_txdma_hw_stop: NULL ring pointers")); 2519 return; 2520 } 2521 2522 ndmas = tx_rings->ndmas; 2523 if (!ndmas) { 2524 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2525 "<== nxge_txdma_hw_stop: no dma channel allocated")); 2526 return; 2527 } 2528 2529 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2530 "tx_rings $%p tx_desc_rings $%p", 2531 tx_rings, tx_desc_rings)); 2532 2533 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2534 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2535 2536 for (i = 0; i < ndmas; i++) { 2537 channel = tx_desc_rings[i]->tdc; 2538 (void) nxge_txdma_stop_channel(nxgep, channel, 2539 (p_tx_ring_t)tx_desc_rings[i], 2540 (p_tx_mbox_t)tx_mbox_p[i]); 2541 } 2542 2543 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2544 "tx_rings $%p tx_desc_rings $%p", 2545 tx_rings, tx_desc_rings)); 2546 2547 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop")); 2548 } 2549 2550 static nxge_status_t 2551 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2552 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2553 2554 { 2555 nxge_status_t status = NXGE_OK; 2556 2557 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2558 "==> nxge_txdma_start_channel (channel %d)", channel)); 2559 /* 2560 * TXDMA/TXC must be in stopped state. 2561 */ 2562 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2563 2564 /* 2565 * Reset TXDMA channel 2566 */ 2567 tx_ring_p->tx_cs.value = 0; 2568 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2569 status = nxge_reset_txdma_channel(nxgep, channel, 2570 tx_ring_p->tx_cs.value); 2571 if (status != NXGE_OK) { 2572 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2573 "==> nxge_txdma_start_channel (channel %d)" 2574 " reset channel failed 0x%x", channel, status)); 2575 goto nxge_txdma_start_channel_exit; 2576 } 2577 2578 /* 2579 * Initialize the TXDMA channel specific FZC control 2580 * configurations. These FZC registers are pertaining 2581 * to each TX channel (i.e. logical pages). 2582 */ 2583 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2584 tx_ring_p, tx_mbox_p); 2585 if (status != NXGE_OK) { 2586 goto nxge_txdma_start_channel_exit; 2587 } 2588 2589 /* 2590 * Initialize the event masks. 2591 */ 2592 tx_ring_p->tx_evmask.value = 0; 2593 status = nxge_init_txdma_channel_event_mask(nxgep, 2594 channel, &tx_ring_p->tx_evmask); 2595 if (status != NXGE_OK) { 2596 goto nxge_txdma_start_channel_exit; 2597 } 2598 2599 /* 2600 * Load TXDMA descriptors, buffers, mailbox, 2601 * initialise the DMA channels and 2602 * enable each DMA channel. 2603 */ 2604 status = nxge_enable_txdma_channel(nxgep, channel, 2605 tx_ring_p, tx_mbox_p); 2606 if (status != NXGE_OK) { 2607 goto nxge_txdma_start_channel_exit; 2608 } 2609 2610 nxge_txdma_start_channel_exit: 2611 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2612 2613 return (status); 2614 } 2615 2616 /*ARGSUSED*/ 2617 static nxge_status_t 2618 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel, 2619 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2620 { 2621 int status = NXGE_OK; 2622 2623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2624 "==> nxge_txdma_stop_channel: channel %d", channel)); 2625 2626 /* 2627 * Stop (disable) TXDMA and TXC (if stop bit is set 2628 * and STOP_N_GO bit not set, the TXDMA reset state will 2629 * not be set if reset TXDMA. 2630 */ 2631 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2632 2633 /* 2634 * Reset TXDMA channel 2635 */ 2636 tx_ring_p->tx_cs.value = 0; 2637 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2638 status = nxge_reset_txdma_channel(nxgep, channel, 2639 tx_ring_p->tx_cs.value); 2640 if (status != NXGE_OK) { 2641 goto nxge_txdma_stop_channel_exit; 2642 } 2643 2644 #ifdef HARDWARE_REQUIRED 2645 /* Set up the interrupt event masks. */ 2646 tx_ring_p->tx_evmask.value = 0; 2647 status = nxge_init_txdma_channel_event_mask(nxgep, 2648 channel, &tx_ring_p->tx_evmask); 2649 if (status != NXGE_OK) { 2650 goto nxge_txdma_stop_channel_exit; 2651 } 2652 2653 /* Initialize the DMA control and status register */ 2654 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2655 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2656 tx_ring_p->tx_cs.value); 2657 if (status != NXGE_OK) { 2658 goto nxge_txdma_stop_channel_exit; 2659 } 2660 2661 /* Disable channel */ 2662 status = nxge_disable_txdma_channel(nxgep, channel, 2663 tx_ring_p, tx_mbox_p); 2664 if (status != NXGE_OK) { 2665 goto nxge_txdma_start_channel_exit; 2666 } 2667 2668 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2669 "==> nxge_txdma_stop_channel: event done")); 2670 2671 #endif 2672 2673 nxge_txdma_stop_channel_exit: 2674 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2675 return (status); 2676 } 2677 2678 static p_tx_ring_t 2679 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2680 { 2681 int index, ndmas; 2682 uint16_t tdc; 2683 p_tx_rings_t tx_rings; 2684 2685 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2686 2687 tx_rings = nxgep->tx_rings; 2688 if (tx_rings == NULL) { 2689 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2690 "<== nxge_txdma_get_ring: NULL ring pointer")); 2691 return (NULL); 2692 } 2693 2694 ndmas = tx_rings->ndmas; 2695 if (!ndmas) { 2696 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2697 "<== nxge_txdma_get_ring: no channel allocated")); 2698 return (NULL); 2699 } 2700 2701 if (tx_rings->rings == NULL) { 2702 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2703 "<== nxge_txdma_get_ring: NULL rings pointer")); 2704 return (NULL); 2705 } 2706 2707 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: " 2708 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2709 tx_rings, tx_rings, ndmas)); 2710 2711 for (index = 0; index < ndmas; index++) { 2712 tdc = tx_rings->rings[index]->tdc; 2713 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2714 "==> nxge_fixup_txdma_rings: channel %d", tdc)); 2715 if (channel == tdc) { 2716 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2717 "<== nxge_txdma_get_ring: tdc %d " 2718 "ring $%p", 2719 tdc, tx_rings->rings[index])); 2720 return (p_tx_ring_t)(tx_rings->rings[index]); 2721 } 2722 } 2723 2724 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring")); 2725 return (NULL); 2726 } 2727 2728 static p_tx_mbox_t 2729 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 2730 { 2731 int index, tdc, ndmas; 2732 p_tx_rings_t tx_rings; 2733 p_tx_mbox_areas_t tx_mbox_areas_p; 2734 p_tx_mbox_t *tx_mbox_p; 2735 2736 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 2737 2738 tx_rings = nxgep->tx_rings; 2739 if (tx_rings == NULL) { 2740 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2741 "<== nxge_txdma_get_mbox: NULL ring pointer")); 2742 return (NULL); 2743 } 2744 2745 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2746 if (tx_mbox_areas_p == NULL) { 2747 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2748 "<== nxge_txdma_get_mbox: NULL mbox pointer")); 2749 return (NULL); 2750 } 2751 2752 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2753 2754 ndmas = tx_rings->ndmas; 2755 if (!ndmas) { 2756 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2757 "<== nxge_txdma_get_mbox: no channel allocated")); 2758 return (NULL); 2759 } 2760 2761 if (tx_rings->rings == NULL) { 2762 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2763 "<== nxge_txdma_get_mbox: NULL rings pointer")); 2764 return (NULL); 2765 } 2766 2767 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: " 2768 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2769 tx_rings, tx_rings, ndmas)); 2770 2771 for (index = 0; index < ndmas; index++) { 2772 tdc = tx_rings->rings[index]->tdc; 2773 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2774 "==> nxge_txdma_get_mbox: channel %d", tdc)); 2775 if (channel == tdc) { 2776 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2777 "<== nxge_txdma_get_mbox: tdc %d " 2778 "ring $%p", 2779 tdc, tx_rings->rings[index])); 2780 return (p_tx_mbox_t)(tx_mbox_p[index]); 2781 } 2782 } 2783 2784 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox")); 2785 return (NULL); 2786 } 2787 2788 /*ARGSUSED*/ 2789 static nxge_status_t 2790 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 2791 { 2792 npi_handle_t handle; 2793 npi_status_t rs; 2794 uint8_t channel; 2795 p_tx_ring_t *tx_rings; 2796 p_tx_ring_t tx_ring_p; 2797 p_nxge_tx_ring_stats_t tdc_stats; 2798 boolean_t txchan_fatal = B_FALSE; 2799 nxge_status_t status = NXGE_OK; 2800 tdmc_inj_par_err_t par_err; 2801 uint32_t value; 2802 2803 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts")); 2804 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2805 channel = ldvp->channel; 2806 2807 tx_rings = nxgep->tx_rings->rings; 2808 tx_ring_p = tx_rings[index]; 2809 tdc_stats = tx_ring_p->tdc_stats; 2810 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 2811 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 2812 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 2813 if ((rs = npi_txdma_ring_error_get(handle, channel, 2814 &tdc_stats->errlog)) != NPI_SUCCESS) 2815 return (NXGE_ERROR | rs); 2816 } 2817 2818 if (cs.bits.ldw.mbox_err) { 2819 tdc_stats->mbox_err++; 2820 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2821 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 2822 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2823 "==> nxge_tx_err_evnts(channel %d): " 2824 "fatal error: mailbox", channel)); 2825 txchan_fatal = B_TRUE; 2826 } 2827 if (cs.bits.ldw.pkt_size_err) { 2828 tdc_stats->pkt_size_err++; 2829 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2830 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2831 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2832 "==> nxge_tx_err_evnts(channel %d): " 2833 "fatal error: pkt_size_err", channel)); 2834 txchan_fatal = B_TRUE; 2835 } 2836 if (cs.bits.ldw.tx_ring_oflow) { 2837 tdc_stats->tx_ring_oflow++; 2838 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2839 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 2840 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2841 "==> nxge_tx_err_evnts(channel %d): " 2842 "fatal error: tx_ring_oflow", channel)); 2843 txchan_fatal = B_TRUE; 2844 } 2845 if (cs.bits.ldw.pref_buf_par_err) { 2846 tdc_stats->pre_buf_par_err++; 2847 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2848 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 2849 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2850 "==> nxge_tx_err_evnts(channel %d): " 2851 "fatal error: pre_buf_par_err", channel)); 2852 /* Clear error injection source for parity error */ 2853 (void) npi_txdma_inj_par_error_get(handle, &value); 2854 par_err.value = value; 2855 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 2856 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 2857 txchan_fatal = B_TRUE; 2858 } 2859 if (cs.bits.ldw.nack_pref) { 2860 tdc_stats->nack_pref++; 2861 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2862 NXGE_FM_EREPORT_TDMC_NACK_PREF); 2863 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2864 "==> nxge_tx_err_evnts(channel %d): " 2865 "fatal error: nack_pref", channel)); 2866 txchan_fatal = B_TRUE; 2867 } 2868 if (cs.bits.ldw.nack_pkt_rd) { 2869 tdc_stats->nack_pkt_rd++; 2870 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2871 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 2872 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2873 "==> nxge_tx_err_evnts(channel %d): " 2874 "fatal error: nack_pkt_rd", channel)); 2875 txchan_fatal = B_TRUE; 2876 } 2877 if (cs.bits.ldw.conf_part_err) { 2878 tdc_stats->conf_part_err++; 2879 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2880 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 2881 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2882 "==> nxge_tx_err_evnts(channel %d): " 2883 "fatal error: config_partition_err", channel)); 2884 txchan_fatal = B_TRUE; 2885 } 2886 if (cs.bits.ldw.pkt_prt_err) { 2887 tdc_stats->pkt_part_err++; 2888 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2889 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 2890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2891 "==> nxge_tx_err_evnts(channel %d): " 2892 "fatal error: pkt_prt_err", channel)); 2893 txchan_fatal = B_TRUE; 2894 } 2895 2896 /* Clear error injection source in case this is an injected error */ 2897 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 2898 2899 if (txchan_fatal) { 2900 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2901 " nxge_tx_err_evnts: " 2902 " fatal error on channel %d cs 0x%llx\n", 2903 channel, cs.value)); 2904 status = nxge_txdma_fatal_err_recover(nxgep, channel, 2905 tx_ring_p); 2906 if (status == NXGE_OK) { 2907 FM_SERVICE_RESTORED(nxgep); 2908 } 2909 } 2910 2911 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts")); 2912 2913 return (status); 2914 } 2915 2916 static nxge_status_t 2917 nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel, 2918 p_tx_ring_t tx_ring_p) 2919 { 2920 npi_handle_t handle; 2921 npi_status_t rs = NPI_SUCCESS; 2922 p_tx_mbox_t tx_mbox_p; 2923 nxge_status_t status = NXGE_OK; 2924 2925 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 2926 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2927 "Recovering from TxDMAChannel#%d error...", channel)); 2928 2929 /* 2930 * Stop the dma channel waits for the stop done. 2931 * If the stop done bit is not set, then create 2932 * an error. 2933 */ 2934 2935 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2936 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 2937 MUTEX_ENTER(&tx_ring_p->lock); 2938 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 2939 if (rs != NPI_SUCCESS) { 2940 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2941 "==> nxge_txdma_fatal_err_recover (channel %d): " 2942 "stop failed ", channel)); 2943 goto fail; 2944 } 2945 2946 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 2947 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2948 2949 /* 2950 * Reset TXDMA channel 2951 */ 2952 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 2953 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2954 NPI_SUCCESS) { 2955 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2956 "==> nxge_txdma_fatal_err_recover (channel %d)" 2957 " reset channel failed 0x%x", channel, rs)); 2958 goto fail; 2959 } 2960 2961 /* 2962 * Reset the tail (kick) register to 0. 2963 * (Hardware will not reset it. Tx overflow fatal 2964 * error if tail is not set to 0 after reset! 2965 */ 2966 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2967 2968 /* Restart TXDMA channel */ 2969 2970 /* 2971 * Initialize the TXDMA channel specific FZC control 2972 * configurations. These FZC registers are pertaining 2973 * to each TX channel (i.e. logical pages). 2974 */ 2975 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 2976 2977 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 2978 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2979 tx_ring_p, tx_mbox_p); 2980 if (status != NXGE_OK) 2981 goto fail; 2982 2983 /* 2984 * Initialize the event masks. 2985 */ 2986 tx_ring_p->tx_evmask.value = 0; 2987 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 2988 &tx_ring_p->tx_evmask); 2989 if (status != NXGE_OK) 2990 goto fail; 2991 2992 tx_ring_p->wr_index_wrap = B_FALSE; 2993 tx_ring_p->wr_index = 0; 2994 tx_ring_p->rd_index = 0; 2995 2996 /* 2997 * Load TXDMA descriptors, buffers, mailbox, 2998 * initialise the DMA channels and 2999 * enable each DMA channel. 3000 */ 3001 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3002 status = nxge_enable_txdma_channel(nxgep, channel, 3003 tx_ring_p, tx_mbox_p); 3004 MUTEX_EXIT(&tx_ring_p->lock); 3005 if (status != NXGE_OK) 3006 goto fail; 3007 3008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3009 "Recovery Successful, TxDMAChannel#%d Restored", 3010 channel)); 3011 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3012 3013 return (NXGE_OK); 3014 3015 fail: 3016 MUTEX_EXIT(&tx_ring_p->lock); 3017 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3018 "nxge_txdma_fatal_err_recover (channel %d): " 3019 "failed to recover this txdma channel", channel)); 3020 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3021 3022 return (status); 3023 } 3024 3025 nxge_status_t 3026 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3027 { 3028 npi_handle_t handle; 3029 npi_status_t rs = NPI_SUCCESS; 3030 nxge_status_t status = NXGE_OK; 3031 p_tx_ring_t *tx_desc_rings; 3032 p_tx_rings_t tx_rings; 3033 p_tx_ring_t tx_ring_p; 3034 p_tx_mbox_t tx_mbox_p; 3035 int i, ndmas; 3036 uint16_t channel; 3037 3038 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3039 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3040 "Recovering from TxPort error...")); 3041 3042 /* 3043 * Stop the dma channel waits for the stop done. 3044 * If the stop done bit is not set, then create 3045 * an error. 3046 */ 3047 3048 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3049 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels...")); 3050 3051 tx_rings = nxgep->tx_rings; 3052 tx_desc_rings = tx_rings->rings; 3053 ndmas = tx_rings->ndmas; 3054 3055 for (i = 0; i < ndmas; i++) { 3056 if (tx_desc_rings[i] == NULL) { 3057 continue; 3058 } 3059 tx_ring_p = tx_rings->rings[i]; 3060 MUTEX_ENTER(&tx_ring_p->lock); 3061 } 3062 3063 for (i = 0; i < ndmas; i++) { 3064 if (tx_desc_rings[i] == NULL) { 3065 continue; 3066 } 3067 channel = tx_desc_rings[i]->tdc; 3068 tx_ring_p = tx_rings->rings[i]; 3069 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3070 if (rs != NPI_SUCCESS) { 3071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3072 "==> nxge_txdma_fatal_err_recover (channel %d): " 3073 "stop failed ", channel)); 3074 goto fail; 3075 } 3076 } 3077 3078 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels...")); 3079 3080 for (i = 0; i < ndmas; i++) { 3081 if (tx_desc_rings[i] == NULL) { 3082 continue; 3083 } 3084 tx_ring_p = tx_rings->rings[i]; 3085 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3086 } 3087 3088 /* 3089 * Reset TXDMA channel 3090 */ 3091 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels...")); 3092 3093 for (i = 0; i < ndmas; i++) { 3094 if (tx_desc_rings[i] == NULL) { 3095 continue; 3096 } 3097 channel = tx_desc_rings[i]->tdc; 3098 tx_ring_p = tx_rings->rings[i]; 3099 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, 3100 channel)) != NPI_SUCCESS) { 3101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3102 "==> nxge_txdma_fatal_err_recover (channel %d)" 3103 " reset channel failed 0x%x", channel, rs)); 3104 goto fail; 3105 } 3106 3107 /* 3108 * Reset the tail (kick) register to 0. 3109 * (Hardware will not reset it. Tx overflow fatal 3110 * error if tail is not set to 0 after reset! 3111 */ 3112 3113 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3114 3115 } 3116 3117 /* 3118 * Initialize the TXDMA channel specific FZC control 3119 * configurations. These FZC registers are pertaining 3120 * to each TX channel (i.e. logical pages). 3121 */ 3122 3123 /* Restart TXDMA channels */ 3124 3125 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels...")); 3126 3127 for (i = 0; i < ndmas; i++) { 3128 if (tx_desc_rings[i] == NULL) { 3129 continue; 3130 } 3131 channel = tx_desc_rings[i]->tdc; 3132 tx_ring_p = tx_rings->rings[i]; 3133 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3134 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3135 tx_ring_p, tx_mbox_p); 3136 tx_ring_p->tx_evmask.value = 0; 3137 /* 3138 * Initialize the event masks. 3139 */ 3140 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3141 &tx_ring_p->tx_evmask); 3142 3143 tx_ring_p->wr_index_wrap = B_FALSE; 3144 tx_ring_p->wr_index = 0; 3145 tx_ring_p->rd_index = 0; 3146 3147 if (status != NXGE_OK) 3148 goto fail; 3149 if (status != NXGE_OK) 3150 goto fail; 3151 } 3152 3153 /* 3154 * Load TXDMA descriptors, buffers, mailbox, 3155 * initialise the DMA channels and 3156 * enable each DMA channel. 3157 */ 3158 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels...")); 3159 3160 for (i = 0; i < ndmas; i++) { 3161 if (tx_desc_rings[i] == NULL) { 3162 continue; 3163 } 3164 channel = tx_desc_rings[i]->tdc; 3165 tx_ring_p = tx_rings->rings[i]; 3166 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3167 status = nxge_enable_txdma_channel(nxgep, channel, 3168 tx_ring_p, tx_mbox_p); 3169 if (status != NXGE_OK) 3170 goto fail; 3171 } 3172 3173 for (i = 0; i < ndmas; i++) { 3174 if (tx_desc_rings[i] == NULL) { 3175 continue; 3176 } 3177 tx_ring_p = tx_rings->rings[i]; 3178 MUTEX_EXIT(&tx_ring_p->lock); 3179 } 3180 3181 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3182 "Recovery Successful, TxPort Restored")); 3183 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3184 3185 return (NXGE_OK); 3186 3187 fail: 3188 for (i = 0; i < ndmas; i++) { 3189 if (tx_desc_rings[i] == NULL) { 3190 continue; 3191 } 3192 tx_ring_p = tx_rings->rings[i]; 3193 MUTEX_EXIT(&tx_ring_p->lock); 3194 } 3195 3196 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3197 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3198 "nxge_txdma_fatal_err_recover (channel %d): " 3199 "failed to recover this txdma channel")); 3200 3201 return (status); 3202 } 3203 3204 void 3205 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3206 { 3207 tdmc_intr_dbg_t tdi; 3208 tdmc_inj_par_err_t par_err; 3209 uint32_t value; 3210 npi_handle_t handle; 3211 3212 switch (err_id) { 3213 3214 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3215 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3216 /* Clear error injection source for parity error */ 3217 (void) npi_txdma_inj_par_error_get(handle, &value); 3218 par_err.value = value; 3219 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3220 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3221 3222 par_err.bits.ldw.inject_parity_error = (1 << chan); 3223 (void) npi_txdma_inj_par_error_get(handle, &value); 3224 par_err.value = value; 3225 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3226 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3227 (unsigned long long)par_err.value); 3228 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3229 break; 3230 3231 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3232 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3233 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3234 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3235 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3236 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3237 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3238 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3239 chan, &tdi.value); 3240 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3241 tdi.bits.ldw.pref_buf_par_err = 1; 3242 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3243 tdi.bits.ldw.mbox_err = 1; 3244 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3245 tdi.bits.ldw.nack_pref = 1; 3246 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3247 tdi.bits.ldw.nack_pkt_rd = 1; 3248 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3249 tdi.bits.ldw.pkt_size_err = 1; 3250 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3251 tdi.bits.ldw.tx_ring_oflow = 1; 3252 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3253 tdi.bits.ldw.conf_part_err = 1; 3254 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3255 tdi.bits.ldw.pkt_part_err = 1; 3256 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3257 tdi.value); 3258 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3259 chan, tdi.value); 3260 3261 break; 3262 } 3263 } 3264