1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_txdma.h> 30 #include <sys/llc1.h> 31 32 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 33 uint32_t nxge_tx_minfree = 32; 34 uint32_t nxge_tx_intr_thres = 0; 35 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 36 uint32_t nxge_tx_tiny_pack = 1; 37 uint32_t nxge_tx_use_bcopy = 1; 38 39 extern uint32_t nxge_tx_ring_size; 40 extern uint32_t nxge_bcopy_thresh; 41 extern uint32_t nxge_dvma_thresh; 42 extern uint32_t nxge_dma_stream_thresh; 43 extern dma_method_t nxge_force_dma; 44 45 /* Device register access attributes for PIO. */ 46 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 47 /* Device descriptor access attributes for DMA. */ 48 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t nxge_desc_dma_attr; 52 extern ddi_dma_attr_t nxge_tx_dma_attr; 53 54 extern int nxge_serial_tx(mblk_t *mp, void *arg); 55 56 static nxge_status_t nxge_map_txdma(p_nxge_t); 57 static void nxge_unmap_txdma(p_nxge_t); 58 59 static nxge_status_t nxge_txdma_hw_start(p_nxge_t); 60 static void nxge_txdma_hw_stop(p_nxge_t); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t, 67 p_tx_ring_t, p_tx_mbox_t); 68 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 72 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_tx_ring_t, 75 p_tx_mbox_t *); 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 77 p_tx_ring_t, p_tx_mbox_t); 78 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t, 82 p_tx_ring_t, p_tx_mbox_t); 83 84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 86 p_nxge_ldv_t, tx_cs_t); 87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 89 uint16_t, p_tx_ring_t); 90 91 nxge_status_t 92 nxge_init_txdma_channels(p_nxge_t nxgep) 93 { 94 nxge_status_t status = NXGE_OK; 95 96 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels")); 97 98 status = nxge_map_txdma(nxgep); 99 if (status != NXGE_OK) { 100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 101 "<== nxge_init_txdma_channels: status 0x%x", status)); 102 return (status); 103 } 104 105 status = nxge_txdma_hw_start(nxgep); 106 if (status != NXGE_OK) { 107 nxge_unmap_txdma(nxgep); 108 return (status); 109 } 110 111 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 112 "<== nxge_init_txdma_channels: status 0x%x", status)); 113 114 return (NXGE_OK); 115 } 116 117 void 118 nxge_uninit_txdma_channels(p_nxge_t nxgep) 119 { 120 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels")); 121 122 nxge_txdma_hw_stop(nxgep); 123 nxge_unmap_txdma(nxgep); 124 125 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 126 "<== nxge_uinit_txdma_channels")); 127 } 128 129 void 130 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 131 uint32_t entries, uint32_t size) 132 { 133 size_t tsize; 134 *dest_p = *src_p; 135 tsize = size * entries; 136 dest_p->alength = tsize; 137 dest_p->nblocks = entries; 138 dest_p->block_size = size; 139 dest_p->offset += tsize; 140 141 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 142 src_p->alength -= tsize; 143 src_p->dma_cookie.dmac_laddress += tsize; 144 src_p->dma_cookie.dmac_size -= tsize; 145 } 146 147 nxge_status_t 148 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 149 { 150 npi_status_t rs = NPI_SUCCESS; 151 nxge_status_t status = NXGE_OK; 152 npi_handle_t handle; 153 154 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 155 156 handle = NXGE_DEV_NPI_HANDLE(nxgep); 157 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 158 rs = npi_txdma_channel_reset(handle, channel); 159 } else { 160 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 161 channel); 162 } 163 164 if (rs != NPI_SUCCESS) { 165 status = NXGE_ERROR | rs; 166 } 167 168 /* 169 * Reset the tail (kick) register to 0. 170 * (Hardware will not reset it. Tx overflow fatal 171 * error if tail is not set to 0 after reset! 172 */ 173 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 174 175 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 176 return (status); 177 } 178 179 nxge_status_t 180 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 181 p_tx_dma_ent_msk_t mask_p) 182 { 183 npi_handle_t handle; 184 npi_status_t rs = NPI_SUCCESS; 185 nxge_status_t status = NXGE_OK; 186 187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 188 "<== nxge_init_txdma_channel_event_mask")); 189 190 handle = NXGE_DEV_NPI_HANDLE(nxgep); 191 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 192 if (rs != NPI_SUCCESS) { 193 status = NXGE_ERROR | rs; 194 } 195 196 return (status); 197 } 198 199 nxge_status_t 200 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 201 uint64_t reg_data) 202 { 203 npi_handle_t handle; 204 npi_status_t rs = NPI_SUCCESS; 205 nxge_status_t status = NXGE_OK; 206 207 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 208 "<== nxge_init_txdma_channel_cntl_stat")); 209 210 handle = NXGE_DEV_NPI_HANDLE(nxgep); 211 rs = npi_txdma_control_status(handle, OP_SET, channel, 212 (p_tx_cs_t)®_data); 213 214 if (rs != NPI_SUCCESS) { 215 status = NXGE_ERROR | rs; 216 } 217 218 return (status); 219 } 220 221 nxge_status_t 222 nxge_enable_txdma_channel(p_nxge_t nxgep, 223 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 224 { 225 npi_handle_t handle; 226 npi_status_t rs = NPI_SUCCESS; 227 nxge_status_t status = NXGE_OK; 228 229 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 230 231 handle = NXGE_DEV_NPI_HANDLE(nxgep); 232 /* 233 * Use configuration data composed at init time. 234 * Write to hardware the transmit ring configurations. 235 */ 236 rs = npi_txdma_ring_config(handle, OP_SET, channel, 237 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 238 239 if (rs != NPI_SUCCESS) { 240 return (NXGE_ERROR | rs); 241 } 242 243 /* Write to hardware the mailbox */ 244 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 245 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 246 247 if (rs != NPI_SUCCESS) { 248 return (NXGE_ERROR | rs); 249 } 250 251 /* Start the DMA engine. */ 252 rs = npi_txdma_channel_init_enable(handle, channel); 253 254 if (rs != NPI_SUCCESS) { 255 return (NXGE_ERROR | rs); 256 } 257 258 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 259 260 return (status); 261 } 262 263 void 264 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 265 boolean_t l4_cksum, int pkt_len, uint8_t npads, 266 p_tx_pkt_hdr_all_t pkthdrp) 267 { 268 p_tx_pkt_header_t hdrp; 269 p_mblk_t nmp; 270 uint64_t tmp; 271 size_t mblk_len; 272 size_t iph_len; 273 size_t hdrs_size; 274 uint8_t hdrs_buf[sizeof (struct ether_header) + 275 64 + sizeof (uint32_t)]; 276 uint8_t *ip_buf; 277 uint16_t eth_type; 278 uint8_t ipproto; 279 boolean_t is_vlan = B_FALSE; 280 size_t eth_hdr_size; 281 282 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 283 284 /* 285 * Caller should zero out the headers first. 286 */ 287 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 288 289 if (fill_len) { 290 NXGE_DEBUG_MSG((NULL, TX_CTL, 291 "==> nxge_fill_tx_hdr: pkt_len %d " 292 "npads %d", pkt_len, npads)); 293 tmp = (uint64_t)pkt_len; 294 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 295 goto fill_tx_header_done; 296 } 297 298 tmp = (uint64_t)npads; 299 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 300 301 /* 302 * mp is the original data packet (does not include the 303 * Neptune transmit header). 304 */ 305 nmp = mp; 306 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 307 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 308 "mp $%p b_rptr $%p len %d", 309 mp, nmp->b_rptr, mblk_len)); 310 ip_buf = NULL; 311 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 312 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 313 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 314 "ether type 0x%x", eth_type, hdrp->value)); 315 316 if (eth_type < ETHERMTU) { 317 tmp = 1ull; 318 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 319 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 320 "value 0x%llx", hdrp->value)); 321 if (*(hdrs_buf + sizeof (struct ether_header)) 322 == LLC_SNAP_SAP) { 323 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 324 sizeof (struct ether_header) + 6))); 325 NXGE_DEBUG_MSG((NULL, TX_CTL, 326 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 327 eth_type)); 328 } else { 329 goto fill_tx_header_done; 330 } 331 } else if (eth_type == VLAN_ETHERTYPE) { 332 tmp = 1ull; 333 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 334 335 eth_type = ntohs(((struct ether_vlan_header *) 336 hdrs_buf)->ether_type); 337 is_vlan = B_TRUE; 338 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 339 "value 0x%llx", hdrp->value)); 340 } 341 342 if (!is_vlan) { 343 eth_hdr_size = sizeof (struct ether_header); 344 } else { 345 eth_hdr_size = sizeof (struct ether_vlan_header); 346 } 347 348 switch (eth_type) { 349 case ETHERTYPE_IP: 350 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 351 ip_buf = nmp->b_rptr + eth_hdr_size; 352 mblk_len -= eth_hdr_size; 353 iph_len = ((*ip_buf) & 0x0f); 354 if (mblk_len > (iph_len + sizeof (uint32_t))) { 355 ip_buf = nmp->b_rptr; 356 ip_buf += eth_hdr_size; 357 } else { 358 ip_buf = NULL; 359 } 360 361 } 362 if (ip_buf == NULL) { 363 hdrs_size = 0; 364 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 365 while ((nmp) && (hdrs_size < 366 sizeof (hdrs_buf))) { 367 mblk_len = (size_t)nmp->b_wptr - 368 (size_t)nmp->b_rptr; 369 if (mblk_len >= 370 (sizeof (hdrs_buf) - hdrs_size)) 371 mblk_len = sizeof (hdrs_buf) - 372 hdrs_size; 373 bcopy(nmp->b_rptr, 374 &hdrs_buf[hdrs_size], mblk_len); 375 hdrs_size += mblk_len; 376 nmp = nmp->b_cont; 377 } 378 ip_buf = hdrs_buf; 379 ip_buf += eth_hdr_size; 380 iph_len = ((*ip_buf) & 0x0f); 381 } 382 383 ipproto = ip_buf[9]; 384 385 tmp = (uint64_t)iph_len; 386 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 387 tmp = (uint64_t)(eth_hdr_size >> 1); 388 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 389 390 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 391 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 392 "tmp 0x%x", 393 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 394 ipproto, tmp)); 395 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 396 "value 0x%llx", hdrp->value)); 397 398 break; 399 400 case ETHERTYPE_IPV6: 401 hdrs_size = 0; 402 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 403 while ((nmp) && (hdrs_size < 404 sizeof (hdrs_buf))) { 405 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 406 if (mblk_len >= 407 (sizeof (hdrs_buf) - hdrs_size)) 408 mblk_len = sizeof (hdrs_buf) - 409 hdrs_size; 410 bcopy(nmp->b_rptr, 411 &hdrs_buf[hdrs_size], mblk_len); 412 hdrs_size += mblk_len; 413 nmp = nmp->b_cont; 414 } 415 ip_buf = hdrs_buf; 416 ip_buf += eth_hdr_size; 417 418 tmp = 1ull; 419 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 420 421 tmp = (eth_hdr_size >> 1); 422 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 423 424 /* byte 6 is the next header protocol */ 425 ipproto = ip_buf[6]; 426 427 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 428 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 429 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 430 ipproto)); 431 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 432 "value 0x%llx", hdrp->value)); 433 434 break; 435 436 default: 437 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 438 goto fill_tx_header_done; 439 } 440 441 switch (ipproto) { 442 case IPPROTO_TCP: 443 NXGE_DEBUG_MSG((NULL, TX_CTL, 444 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 445 if (l4_cksum) { 446 tmp = 1ull; 447 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 448 NXGE_DEBUG_MSG((NULL, TX_CTL, 449 "==> nxge_tx_pkt_hdr_init: TCP CKSUM" 450 "value 0x%llx", hdrp->value)); 451 } 452 453 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 454 "value 0x%llx", hdrp->value)); 455 break; 456 457 case IPPROTO_UDP: 458 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 459 if (l4_cksum) { 460 tmp = 0x2ull; 461 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 462 } 463 NXGE_DEBUG_MSG((NULL, TX_CTL, 464 "==> nxge_tx_pkt_hdr_init: UDP" 465 "value 0x%llx", hdrp->value)); 466 break; 467 468 default: 469 goto fill_tx_header_done; 470 } 471 472 fill_tx_header_done: 473 NXGE_DEBUG_MSG((NULL, TX_CTL, 474 "==> nxge_fill_tx_hdr: pkt_len %d " 475 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 476 477 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 478 } 479 480 /*ARGSUSED*/ 481 p_mblk_t 482 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 483 { 484 p_mblk_t newmp = NULL; 485 486 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 487 NXGE_DEBUG_MSG((NULL, TX_CTL, 488 "<== nxge_tx_pkt_header_reserve: allocb failed")); 489 return (NULL); 490 } 491 492 NXGE_DEBUG_MSG((NULL, TX_CTL, 493 "==> nxge_tx_pkt_header_reserve: get new mp")); 494 DB_TYPE(newmp) = M_DATA; 495 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 496 linkb(newmp, mp); 497 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 498 499 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 500 "b_rptr $%p b_wptr $%p", 501 newmp->b_rptr, newmp->b_wptr)); 502 503 NXGE_DEBUG_MSG((NULL, TX_CTL, 504 "<== nxge_tx_pkt_header_reserve: use new mp")); 505 506 return (newmp); 507 } 508 509 int 510 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 511 { 512 uint_t nmblks; 513 ssize_t len; 514 uint_t pkt_len; 515 p_mblk_t nmp, bmp, tmp; 516 uint8_t *b_wptr; 517 518 NXGE_DEBUG_MSG((NULL, TX_CTL, 519 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 520 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 521 522 nmp = mp; 523 bmp = mp; 524 nmblks = 0; 525 pkt_len = 0; 526 *tot_xfer_len_p = 0; 527 528 while (nmp) { 529 len = MBLKL(nmp); 530 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 531 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 532 len, pkt_len, nmblks, 533 *tot_xfer_len_p)); 534 535 if (len <= 0) { 536 bmp = nmp; 537 nmp = nmp->b_cont; 538 NXGE_DEBUG_MSG((NULL, TX_CTL, 539 "==> nxge_tx_pkt_nmblocks: " 540 "len (0) pkt_len %d nmblks %d", 541 pkt_len, nmblks)); 542 continue; 543 } 544 545 *tot_xfer_len_p += len; 546 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 547 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 548 len, pkt_len, nmblks, 549 *tot_xfer_len_p)); 550 551 if (len < nxge_bcopy_thresh) { 552 NXGE_DEBUG_MSG((NULL, TX_CTL, 553 "==> nxge_tx_pkt_nmblocks: " 554 "len %d (< thresh) pkt_len %d nmblks %d", 555 len, pkt_len, nmblks)); 556 if (pkt_len == 0) 557 nmblks++; 558 pkt_len += len; 559 if (pkt_len >= nxge_bcopy_thresh) { 560 pkt_len = 0; 561 len = 0; 562 nmp = bmp; 563 } 564 } else { 565 NXGE_DEBUG_MSG((NULL, TX_CTL, 566 "==> nxge_tx_pkt_nmblocks: " 567 "len %d (> thresh) pkt_len %d nmblks %d", 568 len, pkt_len, nmblks)); 569 pkt_len = 0; 570 nmblks++; 571 /* 572 * Hardware limits the transfer length to 4K. 573 * If len is more than 4K, we need to break 574 * it up to at most 2 more blocks. 575 */ 576 if (len > TX_MAX_TRANSFER_LENGTH) { 577 uint32_t nsegs; 578 579 NXGE_DEBUG_MSG((NULL, TX_CTL, 580 "==> nxge_tx_pkt_nmblocks: " 581 "len %d pkt_len %d nmblks %d nsegs %d", 582 len, pkt_len, nmblks, nsegs)); 583 nsegs = 1; 584 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 585 ++nsegs; 586 } 587 do { 588 b_wptr = nmp->b_rptr + 589 TX_MAX_TRANSFER_LENGTH; 590 nmp->b_wptr = b_wptr; 591 if ((tmp = dupb(nmp)) == NULL) { 592 return (0); 593 } 594 tmp->b_rptr = b_wptr; 595 tmp->b_wptr = nmp->b_wptr; 596 tmp->b_cont = nmp->b_cont; 597 nmp->b_cont = tmp; 598 nmblks++; 599 if (--nsegs) { 600 nmp = tmp; 601 } 602 } while (nsegs); 603 nmp = tmp; 604 } 605 } 606 607 /* 608 * Hardware limits the transmit gather pointers to 15. 609 */ 610 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 611 TX_MAX_GATHER_POINTERS) { 612 NXGE_DEBUG_MSG((NULL, TX_CTL, 613 "==> nxge_tx_pkt_nmblocks: pull msg - " 614 "len %d pkt_len %d nmblks %d", 615 len, pkt_len, nmblks)); 616 /* Pull all message blocks from b_cont */ 617 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 618 return (0); 619 } 620 freemsg(nmp->b_cont); 621 nmp->b_cont = tmp; 622 pkt_len = 0; 623 } 624 bmp = nmp; 625 nmp = nmp->b_cont; 626 } 627 628 NXGE_DEBUG_MSG((NULL, TX_CTL, 629 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 630 "nmblks %d len %d tot_xfer_len %d", 631 mp->b_rptr, mp->b_wptr, nmblks, 632 MBLKL(mp), *tot_xfer_len_p)); 633 634 return (nmblks); 635 } 636 637 boolean_t 638 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 639 { 640 boolean_t status = B_TRUE; 641 p_nxge_dma_common_t tx_desc_dma_p; 642 nxge_dma_common_t desc_area; 643 p_tx_desc_t tx_desc_ring_vp; 644 p_tx_desc_t tx_desc_p; 645 p_tx_desc_t tx_desc_pp; 646 tx_desc_t r_tx_desc; 647 p_tx_msg_t tx_msg_ring; 648 p_tx_msg_t tx_msg_p; 649 npi_handle_t handle; 650 tx_ring_hdl_t tx_head; 651 uint32_t pkt_len; 652 uint_t tx_rd_index; 653 uint16_t head_index, tail_index; 654 uint8_t tdc; 655 boolean_t head_wrap, tail_wrap; 656 p_nxge_tx_ring_stats_t tdc_stats; 657 int rc; 658 659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 660 661 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 662 (nmblks != 0)); 663 NXGE_DEBUG_MSG((nxgep, TX_CTL, 664 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 665 tx_ring_p->descs_pending, nxge_reclaim_pending, 666 nmblks)); 667 if (!status) { 668 tx_desc_dma_p = &tx_ring_p->tdc_desc; 669 desc_area = tx_ring_p->tdc_desc; 670 handle = NXGE_DEV_NPI_HANDLE(nxgep); 671 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 672 tx_desc_ring_vp = 673 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 674 tx_rd_index = tx_ring_p->rd_index; 675 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 676 tx_msg_ring = tx_ring_p->tx_msg_ring; 677 tx_msg_p = &tx_msg_ring[tx_rd_index]; 678 tdc = tx_ring_p->tdc; 679 tdc_stats = tx_ring_p->tdc_stats; 680 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 681 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 682 } 683 684 tail_index = tx_ring_p->wr_index; 685 tail_wrap = tx_ring_p->wr_index_wrap; 686 687 NXGE_DEBUG_MSG((nxgep, TX_CTL, 688 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 689 "tail_index %d tail_wrap %d " 690 "tx_desc_p $%p ($%p) ", 691 tdc, tx_rd_index, tail_index, tail_wrap, 692 tx_desc_p, (*(uint64_t *)tx_desc_p))); 693 /* 694 * Read the hardware maintained transmit head 695 * and wrap around bit. 696 */ 697 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 698 head_index = tx_head.bits.ldw.head; 699 head_wrap = tx_head.bits.ldw.wrap; 700 NXGE_DEBUG_MSG((nxgep, TX_CTL, 701 "==> nxge_txdma_reclaim: " 702 "tx_rd_index %d tail %d tail_wrap %d " 703 "head %d wrap %d", 704 tx_rd_index, tail_index, tail_wrap, 705 head_index, head_wrap)); 706 707 if (head_index == tail_index) { 708 if (TXDMA_RING_EMPTY(head_index, head_wrap, 709 tail_index, tail_wrap) && 710 (head_index == tx_rd_index)) { 711 NXGE_DEBUG_MSG((nxgep, TX_CTL, 712 "==> nxge_txdma_reclaim: EMPTY")); 713 return (B_TRUE); 714 } 715 716 NXGE_DEBUG_MSG((nxgep, TX_CTL, 717 "==> nxge_txdma_reclaim: Checking " 718 "if ring full")); 719 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 720 tail_wrap)) { 721 NXGE_DEBUG_MSG((nxgep, TX_CTL, 722 "==> nxge_txdma_reclaim: full")); 723 return (B_FALSE); 724 } 725 } 726 727 NXGE_DEBUG_MSG((nxgep, TX_CTL, 728 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 729 730 tx_desc_pp = &r_tx_desc; 731 while ((tx_rd_index != head_index) && 732 (tx_ring_p->descs_pending != 0)) { 733 734 NXGE_DEBUG_MSG((nxgep, TX_CTL, 735 "==> nxge_txdma_reclaim: Checking if pending")); 736 737 NXGE_DEBUG_MSG((nxgep, TX_CTL, 738 "==> nxge_txdma_reclaim: " 739 "descs_pending %d ", 740 tx_ring_p->descs_pending)); 741 742 NXGE_DEBUG_MSG((nxgep, TX_CTL, 743 "==> nxge_txdma_reclaim: " 744 "(tx_rd_index %d head_index %d " 745 "(tx_desc_p $%p)", 746 tx_rd_index, head_index, 747 tx_desc_p)); 748 749 tx_desc_pp->value = tx_desc_p->value; 750 NXGE_DEBUG_MSG((nxgep, TX_CTL, 751 "==> nxge_txdma_reclaim: " 752 "(tx_rd_index %d head_index %d " 753 "tx_desc_p $%p (desc value 0x%llx) ", 754 tx_rd_index, head_index, 755 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 756 757 NXGE_DEBUG_MSG((nxgep, TX_CTL, 758 "==> nxge_txdma_reclaim: dump desc:")); 759 760 pkt_len = tx_desc_pp->bits.hdw.tr_len; 761 tdc_stats->obytes += pkt_len; 762 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 763 NXGE_DEBUG_MSG((nxgep, TX_CTL, 764 "==> nxge_txdma_reclaim: pkt_len %d " 765 "tdc channel %d opackets %d", 766 pkt_len, 767 tdc, 768 tdc_stats->opackets)); 769 770 if (tx_msg_p->flags.dma_type == USE_DVMA) { 771 NXGE_DEBUG_MSG((nxgep, TX_CTL, 772 "tx_desc_p = $%p " 773 "tx_desc_pp = $%p " 774 "index = %d", 775 tx_desc_p, 776 tx_desc_pp, 777 tx_ring_p->rd_index)); 778 (void) dvma_unload(tx_msg_p->dvma_handle, 779 0, -1); 780 tx_msg_p->dvma_handle = NULL; 781 if (tx_ring_p->dvma_wr_index == 782 tx_ring_p->dvma_wrap_mask) { 783 tx_ring_p->dvma_wr_index = 0; 784 } else { 785 tx_ring_p->dvma_wr_index++; 786 } 787 tx_ring_p->dvma_pending--; 788 } else if (tx_msg_p->flags.dma_type == 789 USE_DMA) { 790 NXGE_DEBUG_MSG((nxgep, TX_CTL, 791 "==> nxge_txdma_reclaim: " 792 "USE DMA")); 793 if (rc = ddi_dma_unbind_handle 794 (tx_msg_p->dma_handle)) { 795 cmn_err(CE_WARN, "!nxge_reclaim: " 796 "ddi_dma_unbind_handle " 797 "failed. status %d", rc); 798 } 799 } 800 NXGE_DEBUG_MSG((nxgep, TX_CTL, 801 "==> nxge_txdma_reclaim: count packets")); 802 /* 803 * count a chained packet only once. 804 */ 805 if (tx_msg_p->tx_message != NULL) { 806 freemsg(tx_msg_p->tx_message); 807 tx_msg_p->tx_message = NULL; 808 } 809 810 tx_msg_p->flags.dma_type = USE_NONE; 811 tx_rd_index = tx_ring_p->rd_index; 812 tx_rd_index = (tx_rd_index + 1) & 813 tx_ring_p->tx_wrap_mask; 814 tx_ring_p->rd_index = tx_rd_index; 815 tx_ring_p->descs_pending--; 816 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 817 tx_msg_p = &tx_msg_ring[tx_rd_index]; 818 } 819 820 status = (nmblks <= (tx_ring_p->tx_ring_size - 821 tx_ring_p->descs_pending - 822 TX_FULL_MARK)); 823 if (status) { 824 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 825 } 826 } else { 827 status = (nmblks <= 828 (tx_ring_p->tx_ring_size - 829 tx_ring_p->descs_pending - 830 TX_FULL_MARK)); 831 } 832 833 NXGE_DEBUG_MSG((nxgep, TX_CTL, 834 "<== nxge_txdma_reclaim status = 0x%08x", status)); 835 836 return (status); 837 } 838 839 uint_t 840 nxge_tx_intr(void *arg1, void *arg2) 841 { 842 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 843 p_nxge_t nxgep = (p_nxge_t)arg2; 844 p_nxge_ldg_t ldgp; 845 uint8_t channel; 846 uint32_t vindex; 847 npi_handle_t handle; 848 tx_cs_t cs; 849 p_tx_ring_t *tx_rings; 850 p_tx_ring_t tx_ring_p; 851 npi_status_t rs = NPI_SUCCESS; 852 uint_t serviced = DDI_INTR_UNCLAIMED; 853 nxge_status_t status = NXGE_OK; 854 855 if (ldvp == NULL) { 856 NXGE_DEBUG_MSG((NULL, INT_CTL, 857 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 858 nxgep, ldvp)); 859 return (DDI_INTR_UNCLAIMED); 860 } 861 862 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 863 nxgep = ldvp->nxgep; 864 } 865 NXGE_DEBUG_MSG((nxgep, INT_CTL, 866 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 867 nxgep, ldvp)); 868 /* 869 * This interrupt handler is for a specific 870 * transmit dma channel. 871 */ 872 handle = NXGE_DEV_NPI_HANDLE(nxgep); 873 /* Get the control and status for this channel. */ 874 channel = ldvp->channel; 875 ldgp = ldvp->ldgp; 876 NXGE_DEBUG_MSG((nxgep, INT_CTL, 877 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 878 "channel %d", 879 nxgep, ldvp, channel)); 880 881 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 882 vindex = ldvp->vdma_index; 883 NXGE_DEBUG_MSG((nxgep, INT_CTL, 884 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 885 channel, vindex, rs)); 886 if (!rs && cs.bits.ldw.mk) { 887 NXGE_DEBUG_MSG((nxgep, INT_CTL, 888 "==> nxge_tx_intr:channel %d ring index %d " 889 "status 0x%08x (mk bit set)", 890 channel, vindex, rs)); 891 tx_rings = nxgep->tx_rings->rings; 892 tx_ring_p = tx_rings[vindex]; 893 NXGE_DEBUG_MSG((nxgep, INT_CTL, 894 "==> nxge_tx_intr:channel %d ring index %d " 895 "status 0x%08x (mk bit set, calling reclaim)", 896 channel, vindex, rs)); 897 898 MUTEX_ENTER(&tx_ring_p->lock); 899 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 900 MUTEX_EXIT(&tx_ring_p->lock); 901 mac_tx_update(nxgep->mach); 902 } 903 904 /* 905 * Process other transmit control and status. 906 * Check the ldv state. 907 */ 908 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 909 /* 910 * Rearm this logical group if this is a single device 911 * group. 912 */ 913 if (ldgp->nldvs == 1) { 914 NXGE_DEBUG_MSG((nxgep, INT_CTL, 915 "==> nxge_tx_intr: rearm")); 916 if (status == NXGE_OK) { 917 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 918 B_TRUE, ldgp->ldg_timer); 919 } 920 } 921 922 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 923 serviced = DDI_INTR_CLAIMED; 924 return (serviced); 925 } 926 927 void 928 nxge_txdma_stop(p_nxge_t nxgep) 929 { 930 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 931 932 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 933 934 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 935 } 936 937 void 938 nxge_txdma_stop_start(p_nxge_t nxgep) 939 { 940 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 941 942 (void) nxge_txdma_stop(nxgep); 943 944 (void) nxge_fixup_txdma_rings(nxgep); 945 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 946 (void) nxge_tx_mac_enable(nxgep); 947 (void) nxge_txdma_hw_kick(nxgep); 948 949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 950 } 951 952 nxge_status_t 953 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 954 { 955 int i, ndmas; 956 uint16_t channel; 957 p_tx_rings_t tx_rings; 958 p_tx_ring_t *tx_desc_rings; 959 npi_handle_t handle; 960 npi_status_t rs = NPI_SUCCESS; 961 nxge_status_t status = NXGE_OK; 962 963 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 964 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 965 966 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 967 NXGE_DEBUG_MSG((nxgep, TX_CTL, 968 "<== nxge_txdma_mode: not initialized")); 969 return (NXGE_ERROR); 970 } 971 972 tx_rings = nxgep->tx_rings; 973 if (tx_rings == NULL) { 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 975 "<== nxge_txdma_hw_mode: NULL global ring pointer")); 976 return (NXGE_ERROR); 977 } 978 979 tx_desc_rings = tx_rings->rings; 980 if (tx_desc_rings == NULL) { 981 NXGE_DEBUG_MSG((nxgep, TX_CTL, 982 "<== nxge_txdma_hw_mode: NULL rings pointer")); 983 return (NXGE_ERROR); 984 } 985 986 ndmas = tx_rings->ndmas; 987 if (!ndmas) { 988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 989 "<== nxge_txdma_hw_mode: no dma channel allocated")); 990 return (NXGE_ERROR); 991 } 992 993 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: " 994 "tx_rings $%p tx_desc_rings $%p ndmas %d", 995 tx_rings, tx_desc_rings, ndmas)); 996 997 handle = NXGE_DEV_NPI_HANDLE(nxgep); 998 for (i = 0; i < ndmas; i++) { 999 if (tx_desc_rings[i] == NULL) { 1000 continue; 1001 } 1002 channel = tx_desc_rings[i]->tdc; 1003 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1004 "==> nxge_txdma_hw_mode: channel %d", channel)); 1005 if (enable) { 1006 rs = npi_txdma_channel_enable(handle, channel); 1007 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1008 "==> nxge_txdma_hw_mode: channel %d (enable) " 1009 "rs 0x%x", channel, rs)); 1010 } else { 1011 /* 1012 * Stop the dma channel and waits for the stop done. 1013 * If the stop done bit is not set, then force 1014 * an error so TXC will stop. 1015 * All channels bound to this port need to be stopped 1016 * and reset after injecting an interrupt error. 1017 */ 1018 rs = npi_txdma_channel_disable(handle, channel); 1019 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1020 "==> nxge_txdma_hw_mode: channel %d (disable) " 1021 "rs 0x%x", channel, rs)); 1022 { 1023 tdmc_intr_dbg_t intr_dbg; 1024 1025 if (rs != NPI_SUCCESS) { 1026 /* Inject any error */ 1027 intr_dbg.value = 0; 1028 intr_dbg.bits.ldw.nack_pref = 1; 1029 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1030 "==> nxge_txdma_hw_mode: " 1031 "channel %d (stop failed 0x%x) " 1032 "(inject err)", rs, channel)); 1033 (void) npi_txdma_inj_int_error_set( 1034 handle, channel, &intr_dbg); 1035 rs = npi_txdma_channel_disable(handle, 1036 channel); 1037 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1038 "==> nxge_txdma_hw_mode: " 1039 "channel %d (stop again 0x%x) " 1040 "(after inject err)", 1041 rs, channel)); 1042 } 1043 } 1044 } 1045 } 1046 1047 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1048 1049 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1050 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1051 1052 return (status); 1053 } 1054 1055 void 1056 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1057 { 1058 npi_handle_t handle; 1059 1060 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1061 "==> nxge_txdma_enable_channel: channel %d", channel)); 1062 1063 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1064 /* enable the transmit dma channels */ 1065 (void) npi_txdma_channel_enable(handle, channel); 1066 1067 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1068 } 1069 1070 void 1071 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1072 { 1073 npi_handle_t handle; 1074 1075 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1076 "==> nxge_txdma_disable_channel: channel %d", channel)); 1077 1078 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1079 /* stop the transmit dma channels */ 1080 (void) npi_txdma_channel_disable(handle, channel); 1081 1082 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1083 } 1084 1085 int 1086 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1087 { 1088 npi_handle_t handle; 1089 tdmc_intr_dbg_t intr_dbg; 1090 int status; 1091 npi_status_t rs = NPI_SUCCESS; 1092 1093 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1094 /* 1095 * Stop the dma channel waits for the stop done. 1096 * If the stop done bit is not set, then create 1097 * an error. 1098 */ 1099 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1100 rs = npi_txdma_channel_disable(handle, channel); 1101 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1102 if (status == NXGE_OK) { 1103 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1104 "<== nxge_txdma_stop_inj_err (channel %d): " 1105 "stopped OK", channel)); 1106 return (status); 1107 } 1108 1109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1110 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1111 "injecting error", channel, rs)); 1112 /* Inject any error */ 1113 intr_dbg.value = 0; 1114 intr_dbg.bits.ldw.nack_pref = 1; 1115 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1116 1117 /* Stop done bit will be set as a result of error injection */ 1118 rs = npi_txdma_channel_disable(handle, channel); 1119 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1120 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1121 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1122 "<== nxge_txdma_stop_inj_err (channel %d): " 1123 "stopped OK ", channel)); 1124 return (status); 1125 } 1126 1127 #if defined(NXGE_DEBUG) 1128 nxge_txdma_regs_dump_channels(nxgep); 1129 #endif 1130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1131 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1132 " (injected error but still not stopped)", channel, rs)); 1133 1134 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1135 return (status); 1136 } 1137 1138 void 1139 nxge_hw_start_tx(p_nxge_t nxgep) 1140 { 1141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx")); 1142 1143 (void) nxge_txdma_hw_start(nxgep); 1144 (void) nxge_tx_mac_enable(nxgep); 1145 1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx")); 1147 } 1148 1149 /*ARGSUSED*/ 1150 void 1151 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1152 { 1153 int index, ndmas; 1154 uint16_t channel; 1155 p_tx_rings_t tx_rings; 1156 1157 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1158 1159 /* 1160 * For each transmit channel, reclaim each descriptor and 1161 * free buffers. 1162 */ 1163 tx_rings = nxgep->tx_rings; 1164 if (tx_rings == NULL) { 1165 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1166 "<== nxge_fixup_txdma_rings: NULL ring pointer")); 1167 return; 1168 } 1169 1170 ndmas = tx_rings->ndmas; 1171 if (!ndmas) { 1172 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1173 "<== nxge_fixup_txdma_rings: no channel allocated")); 1174 return; 1175 } 1176 1177 if (tx_rings->rings == NULL) { 1178 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1179 "<== nxge_fixup_txdma_rings: NULL rings pointer")); 1180 return; 1181 } 1182 1183 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: " 1184 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1185 tx_rings, tx_rings->rings, ndmas)); 1186 1187 for (index = 0; index < ndmas; index++) { 1188 channel = tx_rings->rings[index]->tdc; 1189 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1190 "==> nxge_fixup_txdma_rings: channel %d", channel)); 1191 1192 nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index], 1193 channel); 1194 } 1195 1196 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1197 } 1198 1199 /*ARGSUSED*/ 1200 void 1201 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1202 { 1203 p_tx_ring_t ring_p; 1204 1205 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1206 ring_p = nxge_txdma_get_ring(nxgep, channel); 1207 if (ring_p == NULL) { 1208 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1209 return; 1210 } 1211 1212 if (ring_p->tdc != channel) { 1213 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1214 "<== nxge_txdma_fix_channel: channel not matched " 1215 "ring tdc %d passed channel", 1216 ring_p->tdc, channel)); 1217 return; 1218 } 1219 1220 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1221 1222 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1223 } 1224 1225 /*ARGSUSED*/ 1226 void 1227 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1228 { 1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1230 1231 if (ring_p == NULL) { 1232 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1233 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1234 return; 1235 } 1236 1237 if (ring_p->tdc != channel) { 1238 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1239 "<== nxge_txdma_fixup_channel: channel not matched " 1240 "ring tdc %d passed channel", 1241 ring_p->tdc, channel)); 1242 return; 1243 } 1244 1245 MUTEX_ENTER(&ring_p->lock); 1246 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1247 ring_p->rd_index = 0; 1248 ring_p->wr_index = 0; 1249 ring_p->ring_head.value = 0; 1250 ring_p->ring_kick_tail.value = 0; 1251 ring_p->descs_pending = 0; 1252 MUTEX_EXIT(&ring_p->lock); 1253 1254 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1255 } 1256 1257 /*ARGSUSED*/ 1258 void 1259 nxge_txdma_hw_kick(p_nxge_t nxgep) 1260 { 1261 int index, ndmas; 1262 uint16_t channel; 1263 p_tx_rings_t tx_rings; 1264 1265 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1266 1267 tx_rings = nxgep->tx_rings; 1268 if (tx_rings == NULL) { 1269 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1270 "<== nxge_txdma_hw_kick: NULL ring pointer")); 1271 return; 1272 } 1273 1274 ndmas = tx_rings->ndmas; 1275 if (!ndmas) { 1276 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1277 "<== nxge_txdma_hw_kick: no channel allocated")); 1278 return; 1279 } 1280 1281 if (tx_rings->rings == NULL) { 1282 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1283 "<== nxge_txdma_hw_kick: NULL rings pointer")); 1284 return; 1285 } 1286 1287 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: " 1288 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1289 tx_rings, tx_rings->rings, ndmas)); 1290 1291 for (index = 0; index < ndmas; index++) { 1292 channel = tx_rings->rings[index]->tdc; 1293 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1294 "==> nxge_txdma_hw_kick: channel %d", channel)); 1295 nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index], 1296 channel); 1297 } 1298 1299 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1300 } 1301 1302 /*ARGSUSED*/ 1303 void 1304 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1305 { 1306 p_tx_ring_t ring_p; 1307 1308 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1309 1310 ring_p = nxge_txdma_get_ring(nxgep, channel); 1311 if (ring_p == NULL) { 1312 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1313 " nxge_txdma_kick_channel")); 1314 return; 1315 } 1316 1317 if (ring_p->tdc != channel) { 1318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1319 "<== nxge_txdma_kick_channel: channel not matched " 1320 "ring tdc %d passed channel", 1321 ring_p->tdc, channel)); 1322 return; 1323 } 1324 1325 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1326 1327 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1328 } 1329 1330 /*ARGSUSED*/ 1331 void 1332 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1333 { 1334 1335 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1336 1337 if (ring_p == NULL) { 1338 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1339 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1340 return; 1341 } 1342 1343 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1344 } 1345 1346 /*ARGSUSED*/ 1347 void 1348 nxge_check_tx_hang(p_nxge_t nxgep) 1349 { 1350 1351 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1352 1353 /* 1354 * Needs inputs from hardware for regs: 1355 * head index had not moved since last timeout. 1356 * packets not transmitted or stuffed registers. 1357 */ 1358 if (nxge_txdma_hung(nxgep)) { 1359 nxge_fixup_hung_txdma_rings(nxgep); 1360 } 1361 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1362 } 1363 1364 int 1365 nxge_txdma_hung(p_nxge_t nxgep) 1366 { 1367 int index, ndmas; 1368 uint16_t channel; 1369 p_tx_rings_t tx_rings; 1370 p_tx_ring_t tx_ring_p; 1371 1372 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1373 tx_rings = nxgep->tx_rings; 1374 if (tx_rings == NULL) { 1375 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1376 "<== nxge_txdma_hung: NULL ring pointer")); 1377 return (B_FALSE); 1378 } 1379 1380 ndmas = tx_rings->ndmas; 1381 if (!ndmas) { 1382 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1383 "<== nxge_txdma_hung: no channel " 1384 "allocated")); 1385 return (B_FALSE); 1386 } 1387 1388 if (tx_rings->rings == NULL) { 1389 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1390 "<== nxge_txdma_hung: NULL rings pointer")); 1391 return (B_FALSE); 1392 } 1393 1394 for (index = 0; index < ndmas; index++) { 1395 channel = tx_rings->rings[index]->tdc; 1396 tx_ring_p = tx_rings->rings[index]; 1397 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1398 "==> nxge_txdma_hung: channel %d", channel)); 1399 if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) { 1400 return (B_TRUE); 1401 } 1402 } 1403 1404 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1405 1406 return (B_FALSE); 1407 } 1408 1409 int 1410 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1411 { 1412 uint16_t head_index, tail_index; 1413 boolean_t head_wrap, tail_wrap; 1414 npi_handle_t handle; 1415 tx_ring_hdl_t tx_head; 1416 uint_t tx_rd_index; 1417 1418 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1419 1420 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1421 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1422 "==> nxge_txdma_channel_hung: channel %d", channel)); 1423 MUTEX_ENTER(&tx_ring_p->lock); 1424 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1425 1426 tail_index = tx_ring_p->wr_index; 1427 tail_wrap = tx_ring_p->wr_index_wrap; 1428 tx_rd_index = tx_ring_p->rd_index; 1429 MUTEX_EXIT(&tx_ring_p->lock); 1430 1431 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1432 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1433 "tail_index %d tail_wrap %d ", 1434 channel, tx_rd_index, tail_index, tail_wrap)); 1435 /* 1436 * Read the hardware maintained transmit head 1437 * and wrap around bit. 1438 */ 1439 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1440 head_index = tx_head.bits.ldw.head; 1441 head_wrap = tx_head.bits.ldw.wrap; 1442 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1443 "==> nxge_txdma_channel_hung: " 1444 "tx_rd_index %d tail %d tail_wrap %d " 1445 "head %d wrap %d", 1446 tx_rd_index, tail_index, tail_wrap, 1447 head_index, head_wrap)); 1448 1449 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1450 tail_index, tail_wrap) && 1451 (head_index == tx_rd_index)) { 1452 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1453 "==> nxge_txdma_channel_hung: EMPTY")); 1454 return (B_FALSE); 1455 } 1456 1457 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1458 "==> nxge_txdma_channel_hung: Checking if ring full")); 1459 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1460 tail_wrap)) { 1461 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1462 "==> nxge_txdma_channel_hung: full")); 1463 return (B_TRUE); 1464 } 1465 1466 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1467 1468 return (B_FALSE); 1469 } 1470 1471 /*ARGSUSED*/ 1472 void 1473 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1474 { 1475 int index, ndmas; 1476 uint16_t channel; 1477 p_tx_rings_t tx_rings; 1478 1479 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1480 tx_rings = nxgep->tx_rings; 1481 if (tx_rings == NULL) { 1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1483 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer")); 1484 return; 1485 } 1486 1487 ndmas = tx_rings->ndmas; 1488 if (!ndmas) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1490 "<== nxge_fixup_hung_txdma_rings: no channel " 1491 "allocated")); 1492 return; 1493 } 1494 1495 if (tx_rings->rings == NULL) { 1496 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1497 "<== nxge_fixup_hung_txdma_rings: NULL rings pointer")); 1498 return; 1499 } 1500 1501 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: " 1502 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1503 tx_rings, tx_rings->rings, ndmas)); 1504 1505 for (index = 0; index < ndmas; index++) { 1506 channel = tx_rings->rings[index]->tdc; 1507 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1508 "==> nxge_fixup_hung_txdma_rings: channel %d", 1509 channel)); 1510 1511 nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index], 1512 channel); 1513 } 1514 1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1516 } 1517 1518 /*ARGSUSED*/ 1519 void 1520 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1521 { 1522 p_tx_ring_t ring_p; 1523 1524 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1525 ring_p = nxge_txdma_get_ring(nxgep, channel); 1526 if (ring_p == NULL) { 1527 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1528 "<== nxge_txdma_fix_hung_channel")); 1529 return; 1530 } 1531 1532 if (ring_p->tdc != channel) { 1533 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1534 "<== nxge_txdma_fix_hung_channel: channel not matched " 1535 "ring tdc %d passed channel", 1536 ring_p->tdc, channel)); 1537 return; 1538 } 1539 1540 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1541 1542 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1543 } 1544 1545 /*ARGSUSED*/ 1546 void 1547 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1548 uint16_t channel) 1549 { 1550 npi_handle_t handle; 1551 tdmc_intr_dbg_t intr_dbg; 1552 int status = NXGE_OK; 1553 1554 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1555 1556 if (ring_p == NULL) { 1557 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1558 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1559 return; 1560 } 1561 1562 if (ring_p->tdc != channel) { 1563 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1564 "<== nxge_txdma_fixup_hung_channel: channel " 1565 "not matched " 1566 "ring tdc %d passed channel", 1567 ring_p->tdc, channel)); 1568 return; 1569 } 1570 1571 /* Reclaim descriptors */ 1572 MUTEX_ENTER(&ring_p->lock); 1573 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1574 MUTEX_EXIT(&ring_p->lock); 1575 1576 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1577 /* 1578 * Stop the dma channel waits for the stop done. 1579 * If the stop done bit is not set, then force 1580 * an error. 1581 */ 1582 status = npi_txdma_channel_disable(handle, channel); 1583 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1584 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1585 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1586 "ring tdc %d passed channel %d", 1587 ring_p->tdc, channel)); 1588 return; 1589 } 1590 1591 /* Inject any error */ 1592 intr_dbg.value = 0; 1593 intr_dbg.bits.ldw.nack_pref = 1; 1594 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1595 1596 /* Stop done bit will be set as a result of error injection */ 1597 status = npi_txdma_channel_disable(handle, channel); 1598 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1600 "<== nxge_txdma_fixup_hung_channel: stopped again" 1601 "ring tdc %d passed channel", 1602 ring_p->tdc, channel)); 1603 return; 1604 } 1605 1606 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1607 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1608 "ring tdc %d passed channel", 1609 ring_p->tdc, channel)); 1610 1611 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1612 } 1613 1614 /*ARGSUSED*/ 1615 void 1616 nxge_reclaim_rings(p_nxge_t nxgep) 1617 { 1618 int index, ndmas; 1619 uint16_t channel; 1620 p_tx_rings_t tx_rings; 1621 p_tx_ring_t tx_ring_p; 1622 1623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring")); 1624 tx_rings = nxgep->tx_rings; 1625 if (tx_rings == NULL) { 1626 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1627 "<== nxge_reclain_rimgs: NULL ring pointer")); 1628 return; 1629 } 1630 1631 ndmas = tx_rings->ndmas; 1632 if (!ndmas) { 1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1634 "<== nxge_reclain_rimgs: no channel " 1635 "allocated")); 1636 return; 1637 } 1638 1639 if (tx_rings->rings == NULL) { 1640 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1641 "<== nxge_reclain_rimgs: NULL rings pointer")); 1642 return; 1643 } 1644 1645 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: " 1646 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1647 tx_rings, tx_rings->rings, ndmas)); 1648 1649 for (index = 0; index < ndmas; index++) { 1650 channel = tx_rings->rings[index]->tdc; 1651 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1652 "==> reclain_rimgs: channel %d", 1653 channel)); 1654 tx_ring_p = tx_rings->rings[index]; 1655 MUTEX_ENTER(&tx_ring_p->lock); 1656 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel); 1657 MUTEX_EXIT(&tx_ring_p->lock); 1658 } 1659 1660 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1661 } 1662 1663 void 1664 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1665 { 1666 int index, ndmas; 1667 uint16_t channel; 1668 p_tx_rings_t tx_rings; 1669 npi_handle_t handle; 1670 1671 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels")); 1672 1673 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1674 (void) npi_txdma_dump_fzc_regs(handle); 1675 1676 tx_rings = nxgep->tx_rings; 1677 if (tx_rings == NULL) { 1678 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1679 "<== nxge_txdma_regs_dump_channels: NULL ring")); 1680 return; 1681 } 1682 1683 ndmas = tx_rings->ndmas; 1684 if (!ndmas) { 1685 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1686 "<== nxge_txdma_regs_dump_channels: " 1687 "no channel allocated")); 1688 return; 1689 } 1690 1691 if (tx_rings->rings == NULL) { 1692 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1693 "<== nxge_txdma_regs_dump_channels: NULL rings")); 1694 return; 1695 } 1696 1697 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: " 1698 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1699 tx_rings, tx_rings->rings, ndmas)); 1700 1701 for (index = 0; index < ndmas; index++) { 1702 channel = tx_rings->rings[index]->tdc; 1703 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1704 "==> nxge_txdma_regs_dump_channels: channel %d", 1705 channel)); 1706 (void) npi_txdma_dump_tdc_regs(handle, channel); 1707 } 1708 1709 /* Dump TXC registers */ 1710 (void) npi_txc_dump_fzc_regs(handle); 1711 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 1712 1713 for (index = 0; index < ndmas; index++) { 1714 channel = tx_rings->rings[index]->tdc; 1715 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1716 "==> nxge_txdma_regs_dump_channels: channel %d", 1717 channel)); 1718 (void) npi_txc_dump_tdc_fzc_regs(handle, channel); 1719 } 1720 1721 for (index = 0; index < ndmas; index++) { 1722 channel = tx_rings->rings[index]->tdc; 1723 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1724 "==> nxge_txdma_regs_dump_channels: channel %d", 1725 channel)); 1726 nxge_txdma_regs_dump(nxgep, channel); 1727 } 1728 1729 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 1730 1731 } 1732 1733 void 1734 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 1735 { 1736 npi_handle_t handle; 1737 tx_ring_hdl_t hdl; 1738 tx_ring_kick_t kick; 1739 tx_cs_t cs; 1740 txc_control_t control; 1741 uint32_t bitmap = 0; 1742 uint32_t burst = 0; 1743 uint32_t bytes = 0; 1744 dma_log_page_t cfg; 1745 1746 printf("\n\tfunc # %d tdc %d ", 1747 nxgep->function_num, channel); 1748 cfg.page_num = 0; 1749 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1750 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1751 printf("\n\tlog page func %d valid page 0 %d", 1752 cfg.func_num, cfg.valid); 1753 cfg.page_num = 1; 1754 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1755 printf("\n\tlog page func %d valid page 1 %d", 1756 cfg.func_num, cfg.valid); 1757 1758 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 1759 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 1760 printf("\n\thead value is 0x%0llx", 1761 (long long)hdl.value); 1762 printf("\n\thead index %d", hdl.bits.ldw.head); 1763 printf("\n\tkick value is 0x%0llx", 1764 (long long)kick.value); 1765 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 1766 1767 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 1768 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 1769 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 1770 1771 (void) npi_txc_control(handle, OP_GET, &control); 1772 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 1773 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 1774 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 1775 1776 printf("\n\tTXC port control 0x%0llx", 1777 (long long)control.value); 1778 printf("\n\tTXC port bitmap 0x%x", bitmap); 1779 printf("\n\tTXC max burst %d", burst); 1780 printf("\n\tTXC bytes xmt %d\n", bytes); 1781 1782 { 1783 ipp_status_t status; 1784 1785 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 1786 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 1787 } 1788 } 1789 1790 /* 1791 * Static functions start here. 1792 */ 1793 static nxge_status_t 1794 nxge_map_txdma(p_nxge_t nxgep) 1795 { 1796 int i, ndmas; 1797 uint16_t channel; 1798 p_tx_rings_t tx_rings; 1799 p_tx_ring_t *tx_desc_rings; 1800 p_tx_mbox_areas_t tx_mbox_areas_p; 1801 p_tx_mbox_t *tx_mbox_p; 1802 p_nxge_dma_pool_t dma_buf_poolp; 1803 p_nxge_dma_pool_t dma_cntl_poolp; 1804 p_nxge_dma_common_t *dma_buf_p; 1805 p_nxge_dma_common_t *dma_cntl_p; 1806 nxge_status_t status = NXGE_OK; 1807 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1808 p_nxge_dma_common_t t_dma_buf_p; 1809 p_nxge_dma_common_t t_dma_cntl_p; 1810 #endif 1811 1812 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 1813 1814 dma_buf_poolp = nxgep->tx_buf_pool_p; 1815 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 1816 1817 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1818 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1819 "==> nxge_map_txdma: buf not allocated")); 1820 return (NXGE_ERROR); 1821 } 1822 1823 ndmas = dma_buf_poolp->ndmas; 1824 if (!ndmas) { 1825 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1826 "<== nxge_map_txdma: no dma allocated")); 1827 return (NXGE_ERROR); 1828 } 1829 1830 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1831 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1832 1833 tx_rings = (p_tx_rings_t) 1834 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1835 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1836 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1837 1838 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1839 "tx_rings $%p tx_desc_rings $%p", 1840 tx_rings, tx_desc_rings)); 1841 1842 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1843 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1844 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1845 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1846 1847 /* 1848 * Map descriptors from the buffer pools for each dma channel. 1849 */ 1850 for (i = 0; i < ndmas; i++) { 1851 /* 1852 * Set up and prepare buffer blocks, descriptors 1853 * and mailbox. 1854 */ 1855 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1856 status = nxge_map_txdma_channel(nxgep, channel, 1857 (p_nxge_dma_common_t *)&dma_buf_p[i], 1858 (p_tx_ring_t *)&tx_desc_rings[i], 1859 dma_buf_poolp->num_chunks[i], 1860 (p_nxge_dma_common_t *)&dma_cntl_p[i], 1861 (p_tx_mbox_t *)&tx_mbox_p[i]); 1862 if (status != NXGE_OK) { 1863 goto nxge_map_txdma_fail1; 1864 } 1865 tx_desc_rings[i]->index = (uint16_t)i; 1866 tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i]; 1867 1868 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1869 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 1870 tx_desc_rings[i]->hv_set = B_FALSE; 1871 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 1872 t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i]; 1873 1874 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp = 1875 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 1876 tx_desc_rings[i]->hv_tx_buf_ioaddr_size = 1877 (uint64_t)t_dma_buf_p->orig_alength; 1878 1879 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1880 "==> nxge_map_txdma_channel: " 1881 "hv data buf base io $%p " 1882 "size 0x%llx (%d) " 1883 "buf base io $%p " 1884 "orig vatopa base io $%p " 1885 "orig_len 0x%llx (%d)", 1886 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp, 1887 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1888 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1889 t_dma_buf_p->ioaddr_pp, 1890 t_dma_buf_p->orig_vatopa, 1891 t_dma_buf_p->orig_alength, 1892 t_dma_buf_p->orig_alength)); 1893 1894 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp = 1895 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 1896 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size = 1897 (uint64_t)t_dma_cntl_p->orig_alength; 1898 1899 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1900 "==> nxge_map_txdma_channel: " 1901 "hv cntl base io $%p " 1902 "orig ioaddr_pp ($%p) " 1903 "orig vatopa ($%p) " 1904 "size 0x%llx (%d 0x%x)", 1905 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp, 1906 t_dma_cntl_p->orig_ioaddr_pp, 1907 t_dma_cntl_p->orig_vatopa, 1908 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size, 1909 t_dma_cntl_p->orig_alength, 1910 t_dma_cntl_p->orig_alength)); 1911 } 1912 #endif 1913 } 1914 1915 tx_rings->ndmas = ndmas; 1916 tx_rings->rings = tx_desc_rings; 1917 nxgep->tx_rings = tx_rings; 1918 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1919 nxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1920 1921 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1922 "tx_rings $%p rings $%p", 1923 nxgep->tx_rings, nxgep->tx_rings->rings)); 1924 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1925 "tx_rings $%p tx_desc_rings $%p", 1926 nxgep->tx_rings, tx_desc_rings)); 1927 1928 goto nxge_map_txdma_exit; 1929 1930 nxge_map_txdma_fail1: 1931 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1932 "==> nxge_map_txdma: uninit tx desc " 1933 "(status 0x%x channel %d i %d)", 1934 nxgep, status, channel, i)); 1935 i--; 1936 for (; i >= 0; i--) { 1937 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1938 nxge_unmap_txdma_channel(nxgep, channel, 1939 tx_desc_rings[i], 1940 tx_mbox_p[i]); 1941 } 1942 1943 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1944 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1945 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1946 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1947 1948 nxge_map_txdma_exit: 1949 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1950 "==> nxge_map_txdma: " 1951 "(status 0x%x channel %d)", 1952 status, channel)); 1953 1954 return (status); 1955 } 1956 1957 static void 1958 nxge_unmap_txdma(p_nxge_t nxgep) 1959 { 1960 int i, ndmas; 1961 uint8_t channel; 1962 p_tx_rings_t tx_rings; 1963 p_tx_ring_t *tx_desc_rings; 1964 p_tx_mbox_areas_t tx_mbox_areas_p; 1965 p_tx_mbox_t *tx_mbox_p; 1966 p_nxge_dma_pool_t dma_buf_poolp; 1967 1968 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma")); 1969 1970 dma_buf_poolp = nxgep->tx_buf_pool_p; 1971 if (!dma_buf_poolp->buf_allocated) { 1972 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1973 "==> nxge_unmap_txdma: buf not allocated")); 1974 return; 1975 } 1976 1977 ndmas = dma_buf_poolp->ndmas; 1978 if (!ndmas) { 1979 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1980 "<== nxge_unmap_txdma: no dma allocated")); 1981 return; 1982 } 1983 1984 tx_rings = nxgep->tx_rings; 1985 tx_desc_rings = tx_rings->rings; 1986 if (tx_rings == NULL) { 1987 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1988 "<== nxge_unmap_txdma: NULL ring pointer")); 1989 return; 1990 } 1991 1992 tx_desc_rings = tx_rings->rings; 1993 if (tx_desc_rings == NULL) { 1994 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1995 "<== nxge_unmap_txdma: NULL ring pointers")); 1996 return; 1997 } 1998 1999 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: " 2000 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2001 tx_rings, tx_desc_rings, ndmas)); 2002 2003 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2004 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2005 2006 for (i = 0; i < ndmas; i++) { 2007 channel = tx_desc_rings[i]->tdc; 2008 (void) nxge_unmap_txdma_channel(nxgep, channel, 2009 (p_tx_ring_t)tx_desc_rings[i], 2010 (p_tx_mbox_t)tx_mbox_p[i]); 2011 } 2012 2013 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 2014 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 2015 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 2016 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 2017 2018 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2019 "<== nxge_unmap_txdma")); 2020 } 2021 2022 static nxge_status_t 2023 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2024 p_nxge_dma_common_t *dma_buf_p, 2025 p_tx_ring_t *tx_desc_p, 2026 uint32_t num_chunks, 2027 p_nxge_dma_common_t *dma_cntl_p, 2028 p_tx_mbox_t *tx_mbox_p) 2029 { 2030 int status = NXGE_OK; 2031 2032 /* 2033 * Set up and prepare buffer blocks, descriptors 2034 * and mailbox. 2035 */ 2036 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2037 "==> nxge_map_txdma_channel (channel %d)", channel)); 2038 /* 2039 * Transmit buffer blocks 2040 */ 2041 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2042 dma_buf_p, tx_desc_p, num_chunks); 2043 if (status != NXGE_OK) { 2044 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2045 "==> nxge_map_txdma_channel (channel %d): " 2046 "map buffer failed 0x%x", channel, status)); 2047 goto nxge_map_txdma_channel_exit; 2048 } 2049 2050 /* 2051 * Transmit block ring, and mailbox. 2052 */ 2053 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2054 tx_mbox_p); 2055 2056 goto nxge_map_txdma_channel_exit; 2057 2058 nxge_map_txdma_channel_fail1: 2059 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2060 "==> nxge_map_txdma_channel: unmap buf" 2061 "(status 0x%x channel %d)", 2062 status, channel)); 2063 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2064 2065 nxge_map_txdma_channel_exit: 2066 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2067 "<== nxge_map_txdma_channel: " 2068 "(status 0x%x channel %d)", 2069 status, channel)); 2070 2071 return (status); 2072 } 2073 2074 /*ARGSUSED*/ 2075 static void 2076 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2077 p_tx_ring_t tx_ring_p, 2078 p_tx_mbox_t tx_mbox_p) 2079 { 2080 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2081 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2082 /* 2083 * unmap tx block ring, and mailbox. 2084 */ 2085 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, 2086 tx_ring_p, tx_mbox_p); 2087 2088 /* unmap buffer blocks */ 2089 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p); 2090 2091 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2092 } 2093 2094 /*ARGSUSED*/ 2095 static void 2096 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2097 p_nxge_dma_common_t *dma_cntl_p, 2098 p_tx_ring_t tx_ring_p, 2099 p_tx_mbox_t *tx_mbox_p) 2100 { 2101 p_tx_mbox_t mboxp; 2102 p_nxge_dma_common_t cntl_dmap; 2103 p_nxge_dma_common_t dmap; 2104 p_tx_rng_cfig_t tx_ring_cfig_p; 2105 p_tx_ring_kick_t tx_ring_kick_p; 2106 p_tx_cs_t tx_cs_p; 2107 p_tx_dma_ent_msk_t tx_evmask_p; 2108 p_txdma_mbh_t mboxh_p; 2109 p_txdma_mbl_t mboxl_p; 2110 uint64_t tx_desc_len; 2111 2112 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2113 "==> nxge_map_txdma_channel_cfg_ring")); 2114 2115 cntl_dmap = *dma_cntl_p; 2116 2117 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2118 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2119 sizeof (tx_desc_t)); 2120 /* 2121 * Zero out transmit ring descriptors. 2122 */ 2123 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2124 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2125 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2126 tx_cs_p = &(tx_ring_p->tx_cs); 2127 tx_evmask_p = &(tx_ring_p->tx_evmask); 2128 tx_ring_cfig_p->value = 0; 2129 tx_ring_kick_p->value = 0; 2130 tx_cs_p->value = 0; 2131 tx_evmask_p->value = 0; 2132 2133 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2134 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2135 dma_channel, 2136 dmap->dma_cookie.dmac_laddress)); 2137 2138 tx_ring_cfig_p->value = 0; 2139 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2140 tx_ring_cfig_p->value = 2141 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2142 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2143 2144 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2145 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2146 dma_channel, 2147 tx_ring_cfig_p->value)); 2148 2149 tx_cs_p->bits.ldw.rst = 1; 2150 2151 /* Map in mailbox */ 2152 mboxp = (p_tx_mbox_t) 2153 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2154 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2155 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2156 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2157 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2158 mboxh_p->value = mboxl_p->value = 0; 2159 2160 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2161 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2162 dmap->dma_cookie.dmac_laddress)); 2163 2164 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2165 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2166 2167 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2168 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2169 2170 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2171 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2172 dmap->dma_cookie.dmac_laddress)); 2173 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2174 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2175 "mbox $%p", 2176 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2177 tx_ring_p->page_valid.value = 0; 2178 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2179 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2180 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2181 tx_ring_p->page_hdl.value = 0; 2182 2183 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2184 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2185 2186 tx_ring_p->max_burst.value = 0; 2187 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2188 2189 *tx_mbox_p = mboxp; 2190 2191 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2192 "<== nxge_map_txdma_channel_cfg_ring")); 2193 } 2194 2195 /*ARGSUSED*/ 2196 static void 2197 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2198 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2199 { 2200 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2201 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2202 tx_ring_p->tdc)); 2203 2204 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2205 2206 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2207 "<== nxge_unmap_txdma_channel_cfg_ring")); 2208 } 2209 2210 static nxge_status_t 2211 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2212 p_nxge_dma_common_t *dma_buf_p, 2213 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2214 { 2215 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2216 p_nxge_dma_common_t dmap; 2217 nxge_os_dma_handle_t tx_buf_dma_handle; 2218 p_tx_ring_t tx_ring_p; 2219 p_tx_msg_t tx_msg_ring; 2220 nxge_status_t status = NXGE_OK; 2221 int ddi_status = DDI_SUCCESS; 2222 int i, j, index; 2223 uint32_t size, bsize; 2224 uint32_t nblocks, nmsgs; 2225 2226 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2227 "==> nxge_map_txdma_channel_buf_ring")); 2228 2229 dma_bufp = tmp_bufp = *dma_buf_p; 2230 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2231 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2232 "chunks bufp $%p", 2233 channel, num_chunks, dma_bufp)); 2234 2235 nmsgs = 0; 2236 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2237 nmsgs += tmp_bufp->nblocks; 2238 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2239 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2240 "bufp $%p nblocks %d nmsgs %d", 2241 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2242 } 2243 if (!nmsgs) { 2244 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2245 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2246 "no msg blocks", 2247 channel)); 2248 status = NXGE_ERROR; 2249 goto nxge_map_txdma_channel_buf_ring_exit; 2250 } 2251 2252 tx_ring_p = (p_tx_ring_t) 2253 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2254 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2255 (void *)nxgep->interrupt_cookie); 2256 2257 tx_ring_p->nxgep = nxgep; 2258 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2259 nxge_serial_tx, tx_ring_p); 2260 /* 2261 * Allocate transmit message rings and handles for packets 2262 * not to be copied to premapped buffers. 2263 */ 2264 size = nmsgs * sizeof (tx_msg_t); 2265 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2266 for (i = 0; i < nmsgs; i++) { 2267 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2268 DDI_DMA_DONTWAIT, 0, 2269 &tx_msg_ring[i].dma_handle); 2270 if (ddi_status != DDI_SUCCESS) { 2271 status |= NXGE_DDI_FAILED; 2272 break; 2273 } 2274 } 2275 if (i < nmsgs) { 2276 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2277 "Allocate handles failed.")); 2278 goto nxge_map_txdma_channel_buf_ring_fail1; 2279 } 2280 2281 tx_ring_p->tdc = channel; 2282 tx_ring_p->tx_msg_ring = tx_msg_ring; 2283 tx_ring_p->tx_ring_size = nmsgs; 2284 tx_ring_p->num_chunks = num_chunks; 2285 if (!nxge_tx_intr_thres) { 2286 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2287 } 2288 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2289 tx_ring_p->rd_index = 0; 2290 tx_ring_p->wr_index = 0; 2291 tx_ring_p->ring_head.value = 0; 2292 tx_ring_p->ring_kick_tail.value = 0; 2293 tx_ring_p->descs_pending = 0; 2294 2295 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2296 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2297 "actual tx desc max %d nmsgs %d " 2298 "(config nxge_tx_ring_size %d)", 2299 channel, tx_ring_p->tx_ring_size, nmsgs, 2300 nxge_tx_ring_size)); 2301 2302 /* 2303 * Map in buffers from the buffer pool. 2304 */ 2305 index = 0; 2306 bsize = dma_bufp->block_size; 2307 2308 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2309 "dma_bufp $%p tx_rng_p $%p " 2310 "tx_msg_rng_p $%p bsize %d", 2311 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2312 2313 tx_buf_dma_handle = dma_bufp->dma_handle; 2314 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2315 bsize = dma_bufp->block_size; 2316 nblocks = dma_bufp->nblocks; 2317 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2318 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2319 "size %d dma_bufp $%p", 2320 i, sizeof (nxge_dma_common_t), dma_bufp)); 2321 2322 for (j = 0; j < nblocks; j++) { 2323 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2324 dmap = &tx_msg_ring[index++].buf_dma; 2325 #ifdef TX_MEM_DEBUG 2326 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2327 "==> nxge_map_txdma_channel_buf_ring: j %d" 2328 "dmap $%p", i, dmap)); 2329 #endif 2330 nxge_setup_dma_common(dmap, dma_bufp, 1, 2331 bsize); 2332 } 2333 } 2334 2335 if (i < num_chunks) { 2336 status = NXGE_ERROR; 2337 goto nxge_map_txdma_channel_buf_ring_fail1; 2338 } 2339 2340 *tx_desc_p = tx_ring_p; 2341 2342 goto nxge_map_txdma_channel_buf_ring_exit; 2343 2344 nxge_map_txdma_channel_buf_ring_fail1: 2345 if (tx_ring_p->serial) { 2346 nxge_serialize_destroy(tx_ring_p->serial); 2347 tx_ring_p->serial = NULL; 2348 } 2349 2350 index--; 2351 for (; index >= 0; index--) { 2352 if (tx_msg_ring[index].dma_handle != NULL) { 2353 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2354 } 2355 } 2356 MUTEX_DESTROY(&tx_ring_p->lock); 2357 KMEM_FREE(tx_msg_ring, size); 2358 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2359 2360 status = NXGE_ERROR; 2361 2362 nxge_map_txdma_channel_buf_ring_exit: 2363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2364 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2365 2366 return (status); 2367 } 2368 2369 /*ARGSUSED*/ 2370 static void 2371 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2372 { 2373 p_tx_msg_t tx_msg_ring; 2374 p_tx_msg_t tx_msg_p; 2375 int i; 2376 2377 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2378 "==> nxge_unmap_txdma_channel_buf_ring")); 2379 if (tx_ring_p == NULL) { 2380 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2381 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2382 return; 2383 } 2384 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2385 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2386 tx_ring_p->tdc)); 2387 2388 tx_msg_ring = tx_ring_p->tx_msg_ring; 2389 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2390 tx_msg_p = &tx_msg_ring[i]; 2391 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2392 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2393 "entry = %d", 2394 i)); 2395 (void) dvma_unload(tx_msg_p->dvma_handle, 2396 0, -1); 2397 tx_msg_p->dvma_handle = NULL; 2398 if (tx_ring_p->dvma_wr_index == 2399 tx_ring_p->dvma_wrap_mask) { 2400 tx_ring_p->dvma_wr_index = 0; 2401 } else { 2402 tx_ring_p->dvma_wr_index++; 2403 } 2404 tx_ring_p->dvma_pending--; 2405 } else if (tx_msg_p->flags.dma_type == 2406 USE_DMA) { 2407 if (ddi_dma_unbind_handle 2408 (tx_msg_p->dma_handle)) { 2409 cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: " 2410 "ddi_dma_unbind_handle " 2411 "failed."); 2412 } 2413 } 2414 2415 if (tx_msg_p->tx_message != NULL) { 2416 freemsg(tx_msg_p->tx_message); 2417 tx_msg_p->tx_message = NULL; 2418 } 2419 } 2420 2421 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2422 if (tx_msg_ring[i].dma_handle != NULL) { 2423 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2424 } 2425 } 2426 2427 if (tx_ring_p->serial) { 2428 nxge_serialize_destroy(tx_ring_p->serial); 2429 tx_ring_p->serial = NULL; 2430 } 2431 2432 MUTEX_DESTROY(&tx_ring_p->lock); 2433 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2434 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2435 2436 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2437 "<== nxge_unmap_txdma_channel_buf_ring")); 2438 } 2439 2440 static nxge_status_t 2441 nxge_txdma_hw_start(p_nxge_t nxgep) 2442 { 2443 int i, ndmas; 2444 uint16_t channel; 2445 p_tx_rings_t tx_rings; 2446 p_tx_ring_t *tx_desc_rings; 2447 p_tx_mbox_areas_t tx_mbox_areas_p; 2448 p_tx_mbox_t *tx_mbox_p; 2449 nxge_status_t status = NXGE_OK; 2450 2451 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2452 2453 tx_rings = nxgep->tx_rings; 2454 if (tx_rings == NULL) { 2455 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2456 "<== nxge_txdma_hw_start: NULL ring pointer")); 2457 return (NXGE_ERROR); 2458 } 2459 tx_desc_rings = tx_rings->rings; 2460 if (tx_desc_rings == NULL) { 2461 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2462 "<== nxge_txdma_hw_start: NULL ring pointers")); 2463 return (NXGE_ERROR); 2464 } 2465 2466 ndmas = tx_rings->ndmas; 2467 if (!ndmas) { 2468 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2469 "<== nxge_txdma_hw_start: no dma channel allocated")); 2470 return (NXGE_ERROR); 2471 } 2472 2473 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2474 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2475 tx_rings, tx_desc_rings, ndmas)); 2476 2477 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2478 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2479 2480 for (i = 0; i < ndmas; i++) { 2481 channel = tx_desc_rings[i]->tdc, 2482 status = nxge_txdma_start_channel(nxgep, channel, 2483 (p_tx_ring_t)tx_desc_rings[i], 2484 (p_tx_mbox_t)tx_mbox_p[i]); 2485 if (status != NXGE_OK) { 2486 goto nxge_txdma_hw_start_fail1; 2487 } 2488 } 2489 2490 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2491 "tx_rings $%p rings $%p", 2492 nxgep->tx_rings, nxgep->tx_rings->rings)); 2493 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2494 "tx_rings $%p tx_desc_rings $%p", 2495 nxgep->tx_rings, tx_desc_rings)); 2496 2497 goto nxge_txdma_hw_start_exit; 2498 2499 nxge_txdma_hw_start_fail1: 2500 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2501 "==> nxge_txdma_hw_start: disable " 2502 "(status 0x%x channel %d i %d)", status, channel, i)); 2503 for (; i >= 0; i--) { 2504 channel = tx_desc_rings[i]->tdc, 2505 (void) nxge_txdma_stop_channel(nxgep, channel, 2506 (p_tx_ring_t)tx_desc_rings[i], 2507 (p_tx_mbox_t)tx_mbox_p[i]); 2508 } 2509 2510 nxge_txdma_hw_start_exit: 2511 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2512 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2513 2514 return (status); 2515 } 2516 2517 static void 2518 nxge_txdma_hw_stop(p_nxge_t nxgep) 2519 { 2520 int i, ndmas; 2521 uint16_t channel; 2522 p_tx_rings_t tx_rings; 2523 p_tx_ring_t *tx_desc_rings; 2524 p_tx_mbox_areas_t tx_mbox_areas_p; 2525 p_tx_mbox_t *tx_mbox_p; 2526 2527 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop")); 2528 2529 tx_rings = nxgep->tx_rings; 2530 if (tx_rings == NULL) { 2531 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2532 "<== nxge_txdma_hw_stop: NULL ring pointer")); 2533 return; 2534 } 2535 tx_desc_rings = tx_rings->rings; 2536 if (tx_desc_rings == NULL) { 2537 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2538 "<== nxge_txdma_hw_stop: NULL ring pointers")); 2539 return; 2540 } 2541 2542 ndmas = tx_rings->ndmas; 2543 if (!ndmas) { 2544 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2545 "<== nxge_txdma_hw_stop: no dma channel allocated")); 2546 return; 2547 } 2548 2549 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2550 "tx_rings $%p tx_desc_rings $%p", 2551 tx_rings, tx_desc_rings)); 2552 2553 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2554 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2555 2556 for (i = 0; i < ndmas; i++) { 2557 channel = tx_desc_rings[i]->tdc; 2558 (void) nxge_txdma_stop_channel(nxgep, channel, 2559 (p_tx_ring_t)tx_desc_rings[i], 2560 (p_tx_mbox_t)tx_mbox_p[i]); 2561 } 2562 2563 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2564 "tx_rings $%p tx_desc_rings $%p", 2565 tx_rings, tx_desc_rings)); 2566 2567 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop")); 2568 } 2569 2570 static nxge_status_t 2571 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2572 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2573 2574 { 2575 nxge_status_t status = NXGE_OK; 2576 2577 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2578 "==> nxge_txdma_start_channel (channel %d)", channel)); 2579 /* 2580 * TXDMA/TXC must be in stopped state. 2581 */ 2582 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2583 2584 /* 2585 * Reset TXDMA channel 2586 */ 2587 tx_ring_p->tx_cs.value = 0; 2588 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2589 status = nxge_reset_txdma_channel(nxgep, channel, 2590 tx_ring_p->tx_cs.value); 2591 if (status != NXGE_OK) { 2592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2593 "==> nxge_txdma_start_channel (channel %d)" 2594 " reset channel failed 0x%x", channel, status)); 2595 goto nxge_txdma_start_channel_exit; 2596 } 2597 2598 /* 2599 * Initialize the TXDMA channel specific FZC control 2600 * configurations. These FZC registers are pertaining 2601 * to each TX channel (i.e. logical pages). 2602 */ 2603 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2604 tx_ring_p, tx_mbox_p); 2605 if (status != NXGE_OK) { 2606 goto nxge_txdma_start_channel_exit; 2607 } 2608 2609 /* 2610 * Initialize the event masks. 2611 */ 2612 tx_ring_p->tx_evmask.value = 0; 2613 status = nxge_init_txdma_channel_event_mask(nxgep, 2614 channel, &tx_ring_p->tx_evmask); 2615 if (status != NXGE_OK) { 2616 goto nxge_txdma_start_channel_exit; 2617 } 2618 2619 /* 2620 * Load TXDMA descriptors, buffers, mailbox, 2621 * initialise the DMA channels and 2622 * enable each DMA channel. 2623 */ 2624 status = nxge_enable_txdma_channel(nxgep, channel, 2625 tx_ring_p, tx_mbox_p); 2626 if (status != NXGE_OK) { 2627 goto nxge_txdma_start_channel_exit; 2628 } 2629 2630 nxge_txdma_start_channel_exit: 2631 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2632 2633 return (status); 2634 } 2635 2636 /*ARGSUSED*/ 2637 static nxge_status_t 2638 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel, 2639 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2640 { 2641 int status = NXGE_OK; 2642 2643 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2644 "==> nxge_txdma_stop_channel: channel %d", channel)); 2645 2646 /* 2647 * Stop (disable) TXDMA and TXC (if stop bit is set 2648 * and STOP_N_GO bit not set, the TXDMA reset state will 2649 * not be set if reset TXDMA. 2650 */ 2651 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2652 2653 /* 2654 * Reset TXDMA channel 2655 */ 2656 tx_ring_p->tx_cs.value = 0; 2657 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2658 status = nxge_reset_txdma_channel(nxgep, channel, 2659 tx_ring_p->tx_cs.value); 2660 if (status != NXGE_OK) { 2661 goto nxge_txdma_stop_channel_exit; 2662 } 2663 2664 #ifdef HARDWARE_REQUIRED 2665 /* Set up the interrupt event masks. */ 2666 tx_ring_p->tx_evmask.value = 0; 2667 status = nxge_init_txdma_channel_event_mask(nxgep, 2668 channel, &tx_ring_p->tx_evmask); 2669 if (status != NXGE_OK) { 2670 goto nxge_txdma_stop_channel_exit; 2671 } 2672 2673 /* Initialize the DMA control and status register */ 2674 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2675 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2676 tx_ring_p->tx_cs.value); 2677 if (status != NXGE_OK) { 2678 goto nxge_txdma_stop_channel_exit; 2679 } 2680 2681 /* Disable channel */ 2682 status = nxge_disable_txdma_channel(nxgep, channel, 2683 tx_ring_p, tx_mbox_p); 2684 if (status != NXGE_OK) { 2685 goto nxge_txdma_start_channel_exit; 2686 } 2687 2688 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2689 "==> nxge_txdma_stop_channel: event done")); 2690 2691 #endif 2692 2693 nxge_txdma_stop_channel_exit: 2694 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2695 return (status); 2696 } 2697 2698 static p_tx_ring_t 2699 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2700 { 2701 int index, ndmas; 2702 uint16_t tdc; 2703 p_tx_rings_t tx_rings; 2704 2705 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2706 2707 tx_rings = nxgep->tx_rings; 2708 if (tx_rings == NULL) { 2709 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2710 "<== nxge_txdma_get_ring: NULL ring pointer")); 2711 return (NULL); 2712 } 2713 2714 ndmas = tx_rings->ndmas; 2715 if (!ndmas) { 2716 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2717 "<== nxge_txdma_get_ring: no channel allocated")); 2718 return (NULL); 2719 } 2720 2721 if (tx_rings->rings == NULL) { 2722 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2723 "<== nxge_txdma_get_ring: NULL rings pointer")); 2724 return (NULL); 2725 } 2726 2727 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: " 2728 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2729 tx_rings, tx_rings, ndmas)); 2730 2731 for (index = 0; index < ndmas; index++) { 2732 tdc = tx_rings->rings[index]->tdc; 2733 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2734 "==> nxge_fixup_txdma_rings: channel %d", tdc)); 2735 if (channel == tdc) { 2736 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2737 "<== nxge_txdma_get_ring: tdc %d " 2738 "ring $%p", 2739 tdc, tx_rings->rings[index])); 2740 return (p_tx_ring_t)(tx_rings->rings[index]); 2741 } 2742 } 2743 2744 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring")); 2745 return (NULL); 2746 } 2747 2748 static p_tx_mbox_t 2749 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 2750 { 2751 int index, tdc, ndmas; 2752 p_tx_rings_t tx_rings; 2753 p_tx_mbox_areas_t tx_mbox_areas_p; 2754 p_tx_mbox_t *tx_mbox_p; 2755 2756 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 2757 2758 tx_rings = nxgep->tx_rings; 2759 if (tx_rings == NULL) { 2760 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2761 "<== nxge_txdma_get_mbox: NULL ring pointer")); 2762 return (NULL); 2763 } 2764 2765 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2766 if (tx_mbox_areas_p == NULL) { 2767 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2768 "<== nxge_txdma_get_mbox: NULL mbox pointer")); 2769 return (NULL); 2770 } 2771 2772 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2773 2774 ndmas = tx_rings->ndmas; 2775 if (!ndmas) { 2776 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2777 "<== nxge_txdma_get_mbox: no channel allocated")); 2778 return (NULL); 2779 } 2780 2781 if (tx_rings->rings == NULL) { 2782 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2783 "<== nxge_txdma_get_mbox: NULL rings pointer")); 2784 return (NULL); 2785 } 2786 2787 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: " 2788 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2789 tx_rings, tx_rings, ndmas)); 2790 2791 for (index = 0; index < ndmas; index++) { 2792 tdc = tx_rings->rings[index]->tdc; 2793 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2794 "==> nxge_txdma_get_mbox: channel %d", tdc)); 2795 if (channel == tdc) { 2796 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2797 "<== nxge_txdma_get_mbox: tdc %d " 2798 "ring $%p", 2799 tdc, tx_rings->rings[index])); 2800 return (p_tx_mbox_t)(tx_mbox_p[index]); 2801 } 2802 } 2803 2804 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox")); 2805 return (NULL); 2806 } 2807 2808 /*ARGSUSED*/ 2809 static nxge_status_t 2810 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 2811 { 2812 npi_handle_t handle; 2813 npi_status_t rs; 2814 uint8_t channel; 2815 p_tx_ring_t *tx_rings; 2816 p_tx_ring_t tx_ring_p; 2817 p_nxge_tx_ring_stats_t tdc_stats; 2818 boolean_t txchan_fatal = B_FALSE; 2819 nxge_status_t status = NXGE_OK; 2820 tdmc_inj_par_err_t par_err; 2821 uint32_t value; 2822 2823 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts")); 2824 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2825 channel = ldvp->channel; 2826 2827 tx_rings = nxgep->tx_rings->rings; 2828 tx_ring_p = tx_rings[index]; 2829 tdc_stats = tx_ring_p->tdc_stats; 2830 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 2831 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 2832 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 2833 if ((rs = npi_txdma_ring_error_get(handle, channel, 2834 &tdc_stats->errlog)) != NPI_SUCCESS) 2835 return (NXGE_ERROR | rs); 2836 } 2837 2838 if (cs.bits.ldw.mbox_err) { 2839 tdc_stats->mbox_err++; 2840 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2841 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 2842 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2843 "==> nxge_tx_err_evnts(channel %d): " 2844 "fatal error: mailbox", channel)); 2845 txchan_fatal = B_TRUE; 2846 } 2847 if (cs.bits.ldw.pkt_size_err) { 2848 tdc_stats->pkt_size_err++; 2849 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2850 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2851 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2852 "==> nxge_tx_err_evnts(channel %d): " 2853 "fatal error: pkt_size_err", channel)); 2854 txchan_fatal = B_TRUE; 2855 } 2856 if (cs.bits.ldw.tx_ring_oflow) { 2857 tdc_stats->tx_ring_oflow++; 2858 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2859 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 2860 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2861 "==> nxge_tx_err_evnts(channel %d): " 2862 "fatal error: tx_ring_oflow", channel)); 2863 txchan_fatal = B_TRUE; 2864 } 2865 if (cs.bits.ldw.pref_buf_par_err) { 2866 tdc_stats->pre_buf_par_err++; 2867 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2868 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 2869 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2870 "==> nxge_tx_err_evnts(channel %d): " 2871 "fatal error: pre_buf_par_err", channel)); 2872 /* Clear error injection source for parity error */ 2873 (void) npi_txdma_inj_par_error_get(handle, &value); 2874 par_err.value = value; 2875 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 2876 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 2877 txchan_fatal = B_TRUE; 2878 } 2879 if (cs.bits.ldw.nack_pref) { 2880 tdc_stats->nack_pref++; 2881 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2882 NXGE_FM_EREPORT_TDMC_NACK_PREF); 2883 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2884 "==> nxge_tx_err_evnts(channel %d): " 2885 "fatal error: nack_pref", channel)); 2886 txchan_fatal = B_TRUE; 2887 } 2888 if (cs.bits.ldw.nack_pkt_rd) { 2889 tdc_stats->nack_pkt_rd++; 2890 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2891 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 2892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2893 "==> nxge_tx_err_evnts(channel %d): " 2894 "fatal error: nack_pkt_rd", channel)); 2895 txchan_fatal = B_TRUE; 2896 } 2897 if (cs.bits.ldw.conf_part_err) { 2898 tdc_stats->conf_part_err++; 2899 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2900 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 2901 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2902 "==> nxge_tx_err_evnts(channel %d): " 2903 "fatal error: config_partition_err", channel)); 2904 txchan_fatal = B_TRUE; 2905 } 2906 if (cs.bits.ldw.pkt_prt_err) { 2907 tdc_stats->pkt_part_err++; 2908 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2909 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 2910 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2911 "==> nxge_tx_err_evnts(channel %d): " 2912 "fatal error: pkt_prt_err", channel)); 2913 txchan_fatal = B_TRUE; 2914 } 2915 2916 /* Clear error injection source in case this is an injected error */ 2917 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 2918 2919 if (txchan_fatal) { 2920 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2921 " nxge_tx_err_evnts: " 2922 " fatal error on channel %d cs 0x%llx\n", 2923 channel, cs.value)); 2924 status = nxge_txdma_fatal_err_recover(nxgep, channel, 2925 tx_ring_p); 2926 if (status == NXGE_OK) { 2927 FM_SERVICE_RESTORED(nxgep); 2928 } 2929 } 2930 2931 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts")); 2932 2933 return (status); 2934 } 2935 2936 static nxge_status_t 2937 nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel, 2938 p_tx_ring_t tx_ring_p) 2939 { 2940 npi_handle_t handle; 2941 npi_status_t rs = NPI_SUCCESS; 2942 p_tx_mbox_t tx_mbox_p; 2943 nxge_status_t status = NXGE_OK; 2944 2945 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 2946 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2947 "Recovering from TxDMAChannel#%d error...", channel)); 2948 2949 /* 2950 * Stop the dma channel waits for the stop done. 2951 * If the stop done bit is not set, then create 2952 * an error. 2953 */ 2954 2955 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2956 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 2957 MUTEX_ENTER(&tx_ring_p->lock); 2958 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 2959 if (rs != NPI_SUCCESS) { 2960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2961 "==> nxge_txdma_fatal_err_recover (channel %d): " 2962 "stop failed ", channel)); 2963 goto fail; 2964 } 2965 2966 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 2967 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2968 2969 /* 2970 * Reset TXDMA channel 2971 */ 2972 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 2973 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2974 NPI_SUCCESS) { 2975 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2976 "==> nxge_txdma_fatal_err_recover (channel %d)" 2977 " reset channel failed 0x%x", channel, rs)); 2978 goto fail; 2979 } 2980 2981 /* 2982 * Reset the tail (kick) register to 0. 2983 * (Hardware will not reset it. Tx overflow fatal 2984 * error if tail is not set to 0 after reset! 2985 */ 2986 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2987 2988 /* Restart TXDMA channel */ 2989 2990 /* 2991 * Initialize the TXDMA channel specific FZC control 2992 * configurations. These FZC registers are pertaining 2993 * to each TX channel (i.e. logical pages). 2994 */ 2995 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 2996 2997 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 2998 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2999 tx_ring_p, tx_mbox_p); 3000 if (status != NXGE_OK) 3001 goto fail; 3002 3003 /* 3004 * Initialize the event masks. 3005 */ 3006 tx_ring_p->tx_evmask.value = 0; 3007 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3008 &tx_ring_p->tx_evmask); 3009 if (status != NXGE_OK) 3010 goto fail; 3011 3012 tx_ring_p->wr_index_wrap = B_FALSE; 3013 tx_ring_p->wr_index = 0; 3014 tx_ring_p->rd_index = 0; 3015 3016 /* 3017 * Load TXDMA descriptors, buffers, mailbox, 3018 * initialise the DMA channels and 3019 * enable each DMA channel. 3020 */ 3021 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3022 status = nxge_enable_txdma_channel(nxgep, channel, 3023 tx_ring_p, tx_mbox_p); 3024 MUTEX_EXIT(&tx_ring_p->lock); 3025 if (status != NXGE_OK) 3026 goto fail; 3027 3028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3029 "Recovery Successful, TxDMAChannel#%d Restored", 3030 channel)); 3031 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3032 3033 return (NXGE_OK); 3034 3035 fail: 3036 MUTEX_EXIT(&tx_ring_p->lock); 3037 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3038 "nxge_txdma_fatal_err_recover (channel %d): " 3039 "failed to recover this txdma channel", channel)); 3040 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3041 3042 return (status); 3043 } 3044 3045 nxge_status_t 3046 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3047 { 3048 npi_handle_t handle; 3049 npi_status_t rs = NPI_SUCCESS; 3050 nxge_status_t status = NXGE_OK; 3051 p_tx_ring_t *tx_desc_rings; 3052 p_tx_rings_t tx_rings; 3053 p_tx_ring_t tx_ring_p; 3054 p_tx_mbox_t tx_mbox_p; 3055 int i, ndmas; 3056 uint16_t channel; 3057 3058 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3059 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3060 "Recovering from TxPort error...")); 3061 3062 /* 3063 * Stop the dma channel waits for the stop done. 3064 * If the stop done bit is not set, then create 3065 * an error. 3066 */ 3067 3068 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3069 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels...")); 3070 3071 tx_rings = nxgep->tx_rings; 3072 tx_desc_rings = tx_rings->rings; 3073 ndmas = tx_rings->ndmas; 3074 3075 for (i = 0; i < ndmas; i++) { 3076 if (tx_desc_rings[i] == NULL) { 3077 continue; 3078 } 3079 tx_ring_p = tx_rings->rings[i]; 3080 MUTEX_ENTER(&tx_ring_p->lock); 3081 } 3082 3083 for (i = 0; i < ndmas; i++) { 3084 if (tx_desc_rings[i] == NULL) { 3085 continue; 3086 } 3087 channel = tx_desc_rings[i]->tdc; 3088 tx_ring_p = tx_rings->rings[i]; 3089 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3090 if (rs != NPI_SUCCESS) { 3091 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3092 "==> nxge_txdma_fatal_err_recover (channel %d): " 3093 "stop failed ", channel)); 3094 goto fail; 3095 } 3096 } 3097 3098 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels...")); 3099 3100 for (i = 0; i < ndmas; i++) { 3101 if (tx_desc_rings[i] == NULL) { 3102 continue; 3103 } 3104 tx_ring_p = tx_rings->rings[i]; 3105 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3106 } 3107 3108 /* 3109 * Reset TXDMA channel 3110 */ 3111 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels...")); 3112 3113 for (i = 0; i < ndmas; i++) { 3114 if (tx_desc_rings[i] == NULL) { 3115 continue; 3116 } 3117 channel = tx_desc_rings[i]->tdc; 3118 tx_ring_p = tx_rings->rings[i]; 3119 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, 3120 channel)) != NPI_SUCCESS) { 3121 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3122 "==> nxge_txdma_fatal_err_recover (channel %d)" 3123 " reset channel failed 0x%x", channel, rs)); 3124 goto fail; 3125 } 3126 3127 /* 3128 * Reset the tail (kick) register to 0. 3129 * (Hardware will not reset it. Tx overflow fatal 3130 * error if tail is not set to 0 after reset! 3131 */ 3132 3133 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3134 3135 } 3136 3137 /* 3138 * Initialize the TXDMA channel specific FZC control 3139 * configurations. These FZC registers are pertaining 3140 * to each TX channel (i.e. logical pages). 3141 */ 3142 3143 /* Restart TXDMA channels */ 3144 3145 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels...")); 3146 3147 for (i = 0; i < ndmas; i++) { 3148 if (tx_desc_rings[i] == NULL) { 3149 continue; 3150 } 3151 channel = tx_desc_rings[i]->tdc; 3152 tx_ring_p = tx_rings->rings[i]; 3153 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3154 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3155 tx_ring_p, tx_mbox_p); 3156 tx_ring_p->tx_evmask.value = 0; 3157 /* 3158 * Initialize the event masks. 3159 */ 3160 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3161 &tx_ring_p->tx_evmask); 3162 3163 tx_ring_p->wr_index_wrap = B_FALSE; 3164 tx_ring_p->wr_index = 0; 3165 tx_ring_p->rd_index = 0; 3166 3167 if (status != NXGE_OK) 3168 goto fail; 3169 if (status != NXGE_OK) 3170 goto fail; 3171 } 3172 3173 /* 3174 * Load TXDMA descriptors, buffers, mailbox, 3175 * initialise the DMA channels and 3176 * enable each DMA channel. 3177 */ 3178 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels...")); 3179 3180 for (i = 0; i < ndmas; i++) { 3181 if (tx_desc_rings[i] == NULL) { 3182 continue; 3183 } 3184 channel = tx_desc_rings[i]->tdc; 3185 tx_ring_p = tx_rings->rings[i]; 3186 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3187 status = nxge_enable_txdma_channel(nxgep, channel, 3188 tx_ring_p, tx_mbox_p); 3189 if (status != NXGE_OK) 3190 goto fail; 3191 } 3192 3193 for (i = 0; i < ndmas; i++) { 3194 if (tx_desc_rings[i] == NULL) { 3195 continue; 3196 } 3197 tx_ring_p = tx_rings->rings[i]; 3198 MUTEX_EXIT(&tx_ring_p->lock); 3199 } 3200 3201 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3202 "Recovery Successful, TxPort Restored")); 3203 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3204 3205 return (NXGE_OK); 3206 3207 fail: 3208 for (i = 0; i < ndmas; i++) { 3209 if (tx_desc_rings[i] == NULL) { 3210 continue; 3211 } 3212 tx_ring_p = tx_rings->rings[i]; 3213 MUTEX_EXIT(&tx_ring_p->lock); 3214 } 3215 3216 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3217 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3218 "nxge_txdma_fatal_err_recover (channel %d): " 3219 "failed to recover this txdma channel")); 3220 3221 return (status); 3222 } 3223 3224 void 3225 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3226 { 3227 tdmc_intr_dbg_t tdi; 3228 tdmc_inj_par_err_t par_err; 3229 uint32_t value; 3230 npi_handle_t handle; 3231 3232 switch (err_id) { 3233 3234 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3235 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3236 /* Clear error injection source for parity error */ 3237 (void) npi_txdma_inj_par_error_get(handle, &value); 3238 par_err.value = value; 3239 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3240 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3241 3242 par_err.bits.ldw.inject_parity_error = (1 << chan); 3243 (void) npi_txdma_inj_par_error_get(handle, &value); 3244 par_err.value = value; 3245 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3246 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3247 (unsigned long long)par_err.value); 3248 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3249 break; 3250 3251 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3252 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3253 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3254 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3255 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3256 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3257 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3258 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3259 chan, &tdi.value); 3260 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3261 tdi.bits.ldw.pref_buf_par_err = 1; 3262 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3263 tdi.bits.ldw.mbox_err = 1; 3264 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3265 tdi.bits.ldw.nack_pref = 1; 3266 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3267 tdi.bits.ldw.nack_pkt_rd = 1; 3268 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3269 tdi.bits.ldw.pkt_size_err = 1; 3270 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3271 tdi.bits.ldw.tx_ring_oflow = 1; 3272 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3273 tdi.bits.ldw.conf_part_err = 1; 3274 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3275 tdi.bits.ldw.pkt_part_err = 1; 3276 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3277 tdi.value); 3278 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3279 chan, tdi.value); 3280 3281 break; 3282 } 3283 } 3284