1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_txdma.h> 30 #include <sys/llc1.h> 31 32 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 33 uint32_t nxge_tx_minfree = 32; 34 uint32_t nxge_tx_intr_thres = 0; 35 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 36 uint32_t nxge_tx_tiny_pack = 1; 37 uint32_t nxge_tx_use_bcopy = 1; 38 39 extern uint32_t nxge_tx_ring_size; 40 extern uint32_t nxge_bcopy_thresh; 41 extern uint32_t nxge_dvma_thresh; 42 extern uint32_t nxge_dma_stream_thresh; 43 extern dma_method_t nxge_force_dma; 44 45 /* Device register access attributes for PIO. */ 46 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 47 /* Device descriptor access attributes for DMA. */ 48 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t nxge_desc_dma_attr; 52 extern ddi_dma_attr_t nxge_tx_dma_attr; 53 54 extern int nxge_serial_tx(mblk_t *mp, void *arg); 55 56 static nxge_status_t nxge_map_txdma(p_nxge_t); 57 static void nxge_unmap_txdma(p_nxge_t); 58 59 static nxge_status_t nxge_txdma_hw_start(p_nxge_t); 60 static void nxge_txdma_hw_stop(p_nxge_t); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t, 67 p_tx_ring_t, p_tx_mbox_t); 68 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 72 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_tx_ring_t, 75 p_tx_mbox_t *); 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 77 p_tx_ring_t, p_tx_mbox_t); 78 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t, 82 p_tx_ring_t, p_tx_mbox_t); 83 84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 86 p_nxge_ldv_t, tx_cs_t); 87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 89 uint16_t, p_tx_ring_t); 90 91 nxge_status_t 92 nxge_init_txdma_channels(p_nxge_t nxgep) 93 { 94 nxge_status_t status = NXGE_OK; 95 96 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels")); 97 98 status = nxge_map_txdma(nxgep); 99 if (status != NXGE_OK) { 100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 101 "<== nxge_init_txdma_channels: status 0x%x", status)); 102 return (status); 103 } 104 105 status = nxge_txdma_hw_start(nxgep); 106 if (status != NXGE_OK) { 107 nxge_unmap_txdma(nxgep); 108 return (status); 109 } 110 111 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 112 "<== nxge_init_txdma_channels: status 0x%x", status)); 113 114 return (NXGE_OK); 115 } 116 117 void 118 nxge_uninit_txdma_channels(p_nxge_t nxgep) 119 { 120 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels")); 121 122 nxge_txdma_hw_stop(nxgep); 123 nxge_unmap_txdma(nxgep); 124 125 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 126 "<== nxge_uinit_txdma_channels")); 127 } 128 129 void 130 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 131 uint32_t entries, uint32_t size) 132 { 133 size_t tsize; 134 *dest_p = *src_p; 135 tsize = size * entries; 136 dest_p->alength = tsize; 137 dest_p->nblocks = entries; 138 dest_p->block_size = size; 139 dest_p->offset += tsize; 140 141 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 142 src_p->alength -= tsize; 143 src_p->dma_cookie.dmac_laddress += tsize; 144 src_p->dma_cookie.dmac_size -= tsize; 145 } 146 147 nxge_status_t 148 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 149 { 150 npi_status_t rs = NPI_SUCCESS; 151 nxge_status_t status = NXGE_OK; 152 npi_handle_t handle; 153 154 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 155 156 handle = NXGE_DEV_NPI_HANDLE(nxgep); 157 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 158 rs = npi_txdma_channel_reset(handle, channel); 159 } else { 160 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 161 channel); 162 } 163 164 if (rs != NPI_SUCCESS) { 165 status = NXGE_ERROR | rs; 166 } 167 168 /* 169 * Reset the tail (kick) register to 0. 170 * (Hardware will not reset it. Tx overflow fatal 171 * error if tail is not set to 0 after reset! 172 */ 173 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 174 175 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 176 return (status); 177 } 178 179 nxge_status_t 180 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 181 p_tx_dma_ent_msk_t mask_p) 182 { 183 npi_handle_t handle; 184 npi_status_t rs = NPI_SUCCESS; 185 nxge_status_t status = NXGE_OK; 186 187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 188 "<== nxge_init_txdma_channel_event_mask")); 189 190 handle = NXGE_DEV_NPI_HANDLE(nxgep); 191 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 192 if (rs != NPI_SUCCESS) { 193 status = NXGE_ERROR | rs; 194 } 195 196 return (status); 197 } 198 199 nxge_status_t 200 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 201 uint64_t reg_data) 202 { 203 npi_handle_t handle; 204 npi_status_t rs = NPI_SUCCESS; 205 nxge_status_t status = NXGE_OK; 206 207 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 208 "<== nxge_init_txdma_channel_cntl_stat")); 209 210 handle = NXGE_DEV_NPI_HANDLE(nxgep); 211 rs = npi_txdma_control_status(handle, OP_SET, channel, 212 (p_tx_cs_t)®_data); 213 214 if (rs != NPI_SUCCESS) { 215 status = NXGE_ERROR | rs; 216 } 217 218 return (status); 219 } 220 221 nxge_status_t 222 nxge_enable_txdma_channel(p_nxge_t nxgep, 223 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 224 { 225 npi_handle_t handle; 226 npi_status_t rs = NPI_SUCCESS; 227 nxge_status_t status = NXGE_OK; 228 229 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 230 231 handle = NXGE_DEV_NPI_HANDLE(nxgep); 232 /* 233 * Use configuration data composed at init time. 234 * Write to hardware the transmit ring configurations. 235 */ 236 rs = npi_txdma_ring_config(handle, OP_SET, channel, 237 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 238 239 if (rs != NPI_SUCCESS) { 240 return (NXGE_ERROR | rs); 241 } 242 243 /* Write to hardware the mailbox */ 244 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 245 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 246 247 if (rs != NPI_SUCCESS) { 248 return (NXGE_ERROR | rs); 249 } 250 251 /* Start the DMA engine. */ 252 rs = npi_txdma_channel_init_enable(handle, channel); 253 254 if (rs != NPI_SUCCESS) { 255 return (NXGE_ERROR | rs); 256 } 257 258 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 259 260 return (status); 261 } 262 263 void 264 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 265 boolean_t l4_cksum, int pkt_len, uint8_t npads, 266 p_tx_pkt_hdr_all_t pkthdrp) 267 { 268 p_tx_pkt_header_t hdrp; 269 p_mblk_t nmp; 270 uint64_t tmp; 271 size_t mblk_len; 272 size_t iph_len; 273 size_t hdrs_size; 274 uint8_t hdrs_buf[sizeof (struct ether_header) + 275 64 + sizeof (uint32_t)]; 276 uint8_t *cursor; 277 uint8_t *ip_buf; 278 uint16_t eth_type; 279 uint8_t ipproto; 280 boolean_t is_vlan = B_FALSE; 281 size_t eth_hdr_size; 282 283 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 284 285 /* 286 * Caller should zero out the headers first. 287 */ 288 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 289 290 if (fill_len) { 291 NXGE_DEBUG_MSG((NULL, TX_CTL, 292 "==> nxge_fill_tx_hdr: pkt_len %d " 293 "npads %d", pkt_len, npads)); 294 tmp = (uint64_t)pkt_len; 295 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 296 goto fill_tx_header_done; 297 } 298 299 tmp = (uint64_t)npads; 300 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 301 302 /* 303 * mp is the original data packet (does not include the 304 * Neptune transmit header). 305 */ 306 nmp = mp; 307 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 308 "mp $%p b_rptr $%p len %d", 309 mp, nmp->b_rptr, MBLKL(nmp))); 310 /* copy ether_header from mblk to hdrs_buf */ 311 cursor = &hdrs_buf[0]; 312 tmp = sizeof (struct ether_vlan_header); 313 while ((nmp != NULL) && (tmp > 0)) { 314 size_t buflen; 315 mblk_len = MBLKL(nmp); 316 buflen = min((size_t)tmp, mblk_len); 317 bcopy(nmp->b_rptr, cursor, buflen); 318 cursor += buflen; 319 tmp -= buflen; 320 nmp = nmp->b_cont; 321 } 322 323 nmp = mp; 324 mblk_len = MBLKL(nmp); 325 ip_buf = NULL; 326 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 327 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 328 "ether type 0x%x", eth_type, hdrp->value)); 329 330 if (eth_type < ETHERMTU) { 331 tmp = 1ull; 332 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 333 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 334 "value 0x%llx", hdrp->value)); 335 if (*(hdrs_buf + sizeof (struct ether_header)) 336 == LLC_SNAP_SAP) { 337 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 338 sizeof (struct ether_header) + 6))); 339 NXGE_DEBUG_MSG((NULL, TX_CTL, 340 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 341 eth_type)); 342 } else { 343 goto fill_tx_header_done; 344 } 345 } else if (eth_type == VLAN_ETHERTYPE) { 346 tmp = 1ull; 347 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 348 349 eth_type = ntohs(((struct ether_vlan_header *) 350 hdrs_buf)->ether_type); 351 is_vlan = B_TRUE; 352 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 353 "value 0x%llx", hdrp->value)); 354 } 355 356 if (!is_vlan) { 357 eth_hdr_size = sizeof (struct ether_header); 358 } else { 359 eth_hdr_size = sizeof (struct ether_vlan_header); 360 } 361 362 switch (eth_type) { 363 case ETHERTYPE_IP: 364 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 365 ip_buf = nmp->b_rptr + eth_hdr_size; 366 mblk_len -= eth_hdr_size; 367 iph_len = ((*ip_buf) & 0x0f); 368 if (mblk_len > (iph_len + sizeof (uint32_t))) { 369 ip_buf = nmp->b_rptr; 370 ip_buf += eth_hdr_size; 371 } else { 372 ip_buf = NULL; 373 } 374 375 } 376 if (ip_buf == NULL) { 377 hdrs_size = 0; 378 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 379 while ((nmp) && (hdrs_size < 380 sizeof (hdrs_buf))) { 381 mblk_len = (size_t)nmp->b_wptr - 382 (size_t)nmp->b_rptr; 383 if (mblk_len >= 384 (sizeof (hdrs_buf) - hdrs_size)) 385 mblk_len = sizeof (hdrs_buf) - 386 hdrs_size; 387 bcopy(nmp->b_rptr, 388 &hdrs_buf[hdrs_size], mblk_len); 389 hdrs_size += mblk_len; 390 nmp = nmp->b_cont; 391 } 392 ip_buf = hdrs_buf; 393 ip_buf += eth_hdr_size; 394 iph_len = ((*ip_buf) & 0x0f); 395 } 396 397 ipproto = ip_buf[9]; 398 399 tmp = (uint64_t)iph_len; 400 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 401 tmp = (uint64_t)(eth_hdr_size >> 1); 402 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 403 404 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 405 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 406 "tmp 0x%x", 407 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 408 ipproto, tmp)); 409 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 410 "value 0x%llx", hdrp->value)); 411 412 break; 413 414 case ETHERTYPE_IPV6: 415 hdrs_size = 0; 416 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 417 while ((nmp) && (hdrs_size < 418 sizeof (hdrs_buf))) { 419 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 420 if (mblk_len >= 421 (sizeof (hdrs_buf) - hdrs_size)) 422 mblk_len = sizeof (hdrs_buf) - 423 hdrs_size; 424 bcopy(nmp->b_rptr, 425 &hdrs_buf[hdrs_size], mblk_len); 426 hdrs_size += mblk_len; 427 nmp = nmp->b_cont; 428 } 429 ip_buf = hdrs_buf; 430 ip_buf += eth_hdr_size; 431 432 tmp = 1ull; 433 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 434 435 tmp = (eth_hdr_size >> 1); 436 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 437 438 /* byte 6 is the next header protocol */ 439 ipproto = ip_buf[6]; 440 441 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 442 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 443 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 444 ipproto)); 445 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 446 "value 0x%llx", hdrp->value)); 447 448 break; 449 450 default: 451 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 452 goto fill_tx_header_done; 453 } 454 455 switch (ipproto) { 456 case IPPROTO_TCP: 457 NXGE_DEBUG_MSG((NULL, TX_CTL, 458 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 459 if (l4_cksum) { 460 tmp = 1ull; 461 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 462 NXGE_DEBUG_MSG((NULL, TX_CTL, 463 "==> nxge_tx_pkt_hdr_init: TCP CKSUM" 464 "value 0x%llx", hdrp->value)); 465 } 466 467 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 468 "value 0x%llx", hdrp->value)); 469 break; 470 471 case IPPROTO_UDP: 472 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 473 if (l4_cksum) { 474 tmp = 0x2ull; 475 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 476 } 477 NXGE_DEBUG_MSG((NULL, TX_CTL, 478 "==> nxge_tx_pkt_hdr_init: UDP" 479 "value 0x%llx", hdrp->value)); 480 break; 481 482 default: 483 goto fill_tx_header_done; 484 } 485 486 fill_tx_header_done: 487 NXGE_DEBUG_MSG((NULL, TX_CTL, 488 "==> nxge_fill_tx_hdr: pkt_len %d " 489 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 490 491 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 492 } 493 494 /*ARGSUSED*/ 495 p_mblk_t 496 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 497 { 498 p_mblk_t newmp = NULL; 499 500 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 501 NXGE_DEBUG_MSG((NULL, TX_CTL, 502 "<== nxge_tx_pkt_header_reserve: allocb failed")); 503 return (NULL); 504 } 505 506 NXGE_DEBUG_MSG((NULL, TX_CTL, 507 "==> nxge_tx_pkt_header_reserve: get new mp")); 508 DB_TYPE(newmp) = M_DATA; 509 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 510 linkb(newmp, mp); 511 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 512 513 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 514 "b_rptr $%p b_wptr $%p", 515 newmp->b_rptr, newmp->b_wptr)); 516 517 NXGE_DEBUG_MSG((NULL, TX_CTL, 518 "<== nxge_tx_pkt_header_reserve: use new mp")); 519 520 return (newmp); 521 } 522 523 int 524 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 525 { 526 uint_t nmblks; 527 ssize_t len; 528 uint_t pkt_len; 529 p_mblk_t nmp, bmp, tmp; 530 uint8_t *b_wptr; 531 532 NXGE_DEBUG_MSG((NULL, TX_CTL, 533 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 534 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 535 536 nmp = mp; 537 bmp = mp; 538 nmblks = 0; 539 pkt_len = 0; 540 *tot_xfer_len_p = 0; 541 542 while (nmp) { 543 len = MBLKL(nmp); 544 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 545 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 546 len, pkt_len, nmblks, 547 *tot_xfer_len_p)); 548 549 if (len <= 0) { 550 bmp = nmp; 551 nmp = nmp->b_cont; 552 NXGE_DEBUG_MSG((NULL, TX_CTL, 553 "==> nxge_tx_pkt_nmblocks: " 554 "len (0) pkt_len %d nmblks %d", 555 pkt_len, nmblks)); 556 continue; 557 } 558 559 *tot_xfer_len_p += len; 560 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 561 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 562 len, pkt_len, nmblks, 563 *tot_xfer_len_p)); 564 565 if (len < nxge_bcopy_thresh) { 566 NXGE_DEBUG_MSG((NULL, TX_CTL, 567 "==> nxge_tx_pkt_nmblocks: " 568 "len %d (< thresh) pkt_len %d nmblks %d", 569 len, pkt_len, nmblks)); 570 if (pkt_len == 0) 571 nmblks++; 572 pkt_len += len; 573 if (pkt_len >= nxge_bcopy_thresh) { 574 pkt_len = 0; 575 len = 0; 576 nmp = bmp; 577 } 578 } else { 579 NXGE_DEBUG_MSG((NULL, TX_CTL, 580 "==> nxge_tx_pkt_nmblocks: " 581 "len %d (> thresh) pkt_len %d nmblks %d", 582 len, pkt_len, nmblks)); 583 pkt_len = 0; 584 nmblks++; 585 /* 586 * Hardware limits the transfer length to 4K. 587 * If len is more than 4K, we need to break 588 * it up to at most 2 more blocks. 589 */ 590 if (len > TX_MAX_TRANSFER_LENGTH) { 591 uint32_t nsegs; 592 593 NXGE_DEBUG_MSG((NULL, TX_CTL, 594 "==> nxge_tx_pkt_nmblocks: " 595 "len %d pkt_len %d nmblks %d nsegs %d", 596 len, pkt_len, nmblks, nsegs)); 597 nsegs = 1; 598 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 599 ++nsegs; 600 } 601 do { 602 b_wptr = nmp->b_rptr + 603 TX_MAX_TRANSFER_LENGTH; 604 nmp->b_wptr = b_wptr; 605 if ((tmp = dupb(nmp)) == NULL) { 606 return (0); 607 } 608 tmp->b_rptr = b_wptr; 609 tmp->b_wptr = nmp->b_wptr; 610 tmp->b_cont = nmp->b_cont; 611 nmp->b_cont = tmp; 612 nmblks++; 613 if (--nsegs) { 614 nmp = tmp; 615 } 616 } while (nsegs); 617 nmp = tmp; 618 } 619 } 620 621 /* 622 * Hardware limits the transmit gather pointers to 15. 623 */ 624 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 625 TX_MAX_GATHER_POINTERS) { 626 NXGE_DEBUG_MSG((NULL, TX_CTL, 627 "==> nxge_tx_pkt_nmblocks: pull msg - " 628 "len %d pkt_len %d nmblks %d", 629 len, pkt_len, nmblks)); 630 /* Pull all message blocks from b_cont */ 631 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 632 return (0); 633 } 634 freemsg(nmp->b_cont); 635 nmp->b_cont = tmp; 636 pkt_len = 0; 637 } 638 bmp = nmp; 639 nmp = nmp->b_cont; 640 } 641 642 NXGE_DEBUG_MSG((NULL, TX_CTL, 643 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 644 "nmblks %d len %d tot_xfer_len %d", 645 mp->b_rptr, mp->b_wptr, nmblks, 646 MBLKL(mp), *tot_xfer_len_p)); 647 648 return (nmblks); 649 } 650 651 boolean_t 652 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 653 { 654 boolean_t status = B_TRUE; 655 p_nxge_dma_common_t tx_desc_dma_p; 656 nxge_dma_common_t desc_area; 657 p_tx_desc_t tx_desc_ring_vp; 658 p_tx_desc_t tx_desc_p; 659 p_tx_desc_t tx_desc_pp; 660 tx_desc_t r_tx_desc; 661 p_tx_msg_t tx_msg_ring; 662 p_tx_msg_t tx_msg_p; 663 npi_handle_t handle; 664 tx_ring_hdl_t tx_head; 665 uint32_t pkt_len; 666 uint_t tx_rd_index; 667 uint16_t head_index, tail_index; 668 uint8_t tdc; 669 boolean_t head_wrap, tail_wrap; 670 p_nxge_tx_ring_stats_t tdc_stats; 671 int rc; 672 673 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 674 675 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 676 (nmblks != 0)); 677 NXGE_DEBUG_MSG((nxgep, TX_CTL, 678 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 679 tx_ring_p->descs_pending, nxge_reclaim_pending, 680 nmblks)); 681 if (!status) { 682 tx_desc_dma_p = &tx_ring_p->tdc_desc; 683 desc_area = tx_ring_p->tdc_desc; 684 handle = NXGE_DEV_NPI_HANDLE(nxgep); 685 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 686 tx_desc_ring_vp = 687 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 688 tx_rd_index = tx_ring_p->rd_index; 689 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 690 tx_msg_ring = tx_ring_p->tx_msg_ring; 691 tx_msg_p = &tx_msg_ring[tx_rd_index]; 692 tdc = tx_ring_p->tdc; 693 tdc_stats = tx_ring_p->tdc_stats; 694 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 695 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 696 } 697 698 tail_index = tx_ring_p->wr_index; 699 tail_wrap = tx_ring_p->wr_index_wrap; 700 701 NXGE_DEBUG_MSG((nxgep, TX_CTL, 702 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 703 "tail_index %d tail_wrap %d " 704 "tx_desc_p $%p ($%p) ", 705 tdc, tx_rd_index, tail_index, tail_wrap, 706 tx_desc_p, (*(uint64_t *)tx_desc_p))); 707 /* 708 * Read the hardware maintained transmit head 709 * and wrap around bit. 710 */ 711 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 712 head_index = tx_head.bits.ldw.head; 713 head_wrap = tx_head.bits.ldw.wrap; 714 NXGE_DEBUG_MSG((nxgep, TX_CTL, 715 "==> nxge_txdma_reclaim: " 716 "tx_rd_index %d tail %d tail_wrap %d " 717 "head %d wrap %d", 718 tx_rd_index, tail_index, tail_wrap, 719 head_index, head_wrap)); 720 721 if (head_index == tail_index) { 722 if (TXDMA_RING_EMPTY(head_index, head_wrap, 723 tail_index, tail_wrap) && 724 (head_index == tx_rd_index)) { 725 NXGE_DEBUG_MSG((nxgep, TX_CTL, 726 "==> nxge_txdma_reclaim: EMPTY")); 727 return (B_TRUE); 728 } 729 730 NXGE_DEBUG_MSG((nxgep, TX_CTL, 731 "==> nxge_txdma_reclaim: Checking " 732 "if ring full")); 733 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 734 tail_wrap)) { 735 NXGE_DEBUG_MSG((nxgep, TX_CTL, 736 "==> nxge_txdma_reclaim: full")); 737 return (B_FALSE); 738 } 739 } 740 741 NXGE_DEBUG_MSG((nxgep, TX_CTL, 742 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 743 744 tx_desc_pp = &r_tx_desc; 745 while ((tx_rd_index != head_index) && 746 (tx_ring_p->descs_pending != 0)) { 747 748 NXGE_DEBUG_MSG((nxgep, TX_CTL, 749 "==> nxge_txdma_reclaim: Checking if pending")); 750 751 NXGE_DEBUG_MSG((nxgep, TX_CTL, 752 "==> nxge_txdma_reclaim: " 753 "descs_pending %d ", 754 tx_ring_p->descs_pending)); 755 756 NXGE_DEBUG_MSG((nxgep, TX_CTL, 757 "==> nxge_txdma_reclaim: " 758 "(tx_rd_index %d head_index %d " 759 "(tx_desc_p $%p)", 760 tx_rd_index, head_index, 761 tx_desc_p)); 762 763 tx_desc_pp->value = tx_desc_p->value; 764 NXGE_DEBUG_MSG((nxgep, TX_CTL, 765 "==> nxge_txdma_reclaim: " 766 "(tx_rd_index %d head_index %d " 767 "tx_desc_p $%p (desc value 0x%llx) ", 768 tx_rd_index, head_index, 769 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 770 771 NXGE_DEBUG_MSG((nxgep, TX_CTL, 772 "==> nxge_txdma_reclaim: dump desc:")); 773 774 pkt_len = tx_desc_pp->bits.hdw.tr_len; 775 tdc_stats->obytes += pkt_len; 776 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 777 NXGE_DEBUG_MSG((nxgep, TX_CTL, 778 "==> nxge_txdma_reclaim: pkt_len %d " 779 "tdc channel %d opackets %d", 780 pkt_len, 781 tdc, 782 tdc_stats->opackets)); 783 784 if (tx_msg_p->flags.dma_type == USE_DVMA) { 785 NXGE_DEBUG_MSG((nxgep, TX_CTL, 786 "tx_desc_p = $%p " 787 "tx_desc_pp = $%p " 788 "index = %d", 789 tx_desc_p, 790 tx_desc_pp, 791 tx_ring_p->rd_index)); 792 (void) dvma_unload(tx_msg_p->dvma_handle, 793 0, -1); 794 tx_msg_p->dvma_handle = NULL; 795 if (tx_ring_p->dvma_wr_index == 796 tx_ring_p->dvma_wrap_mask) { 797 tx_ring_p->dvma_wr_index = 0; 798 } else { 799 tx_ring_p->dvma_wr_index++; 800 } 801 tx_ring_p->dvma_pending--; 802 } else if (tx_msg_p->flags.dma_type == 803 USE_DMA) { 804 NXGE_DEBUG_MSG((nxgep, TX_CTL, 805 "==> nxge_txdma_reclaim: " 806 "USE DMA")); 807 if (rc = ddi_dma_unbind_handle 808 (tx_msg_p->dma_handle)) { 809 cmn_err(CE_WARN, "!nxge_reclaim: " 810 "ddi_dma_unbind_handle " 811 "failed. status %d", rc); 812 } 813 } 814 NXGE_DEBUG_MSG((nxgep, TX_CTL, 815 "==> nxge_txdma_reclaim: count packets")); 816 /* 817 * count a chained packet only once. 818 */ 819 if (tx_msg_p->tx_message != NULL) { 820 freemsg(tx_msg_p->tx_message); 821 tx_msg_p->tx_message = NULL; 822 } 823 824 tx_msg_p->flags.dma_type = USE_NONE; 825 tx_rd_index = tx_ring_p->rd_index; 826 tx_rd_index = (tx_rd_index + 1) & 827 tx_ring_p->tx_wrap_mask; 828 tx_ring_p->rd_index = tx_rd_index; 829 tx_ring_p->descs_pending--; 830 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 831 tx_msg_p = &tx_msg_ring[tx_rd_index]; 832 } 833 834 status = (nmblks <= (tx_ring_p->tx_ring_size - 835 tx_ring_p->descs_pending - 836 TX_FULL_MARK)); 837 if (status) { 838 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 839 } 840 } else { 841 status = (nmblks <= 842 (tx_ring_p->tx_ring_size - 843 tx_ring_p->descs_pending - 844 TX_FULL_MARK)); 845 } 846 847 NXGE_DEBUG_MSG((nxgep, TX_CTL, 848 "<== nxge_txdma_reclaim status = 0x%08x", status)); 849 850 return (status); 851 } 852 853 uint_t 854 nxge_tx_intr(void *arg1, void *arg2) 855 { 856 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 857 p_nxge_t nxgep = (p_nxge_t)arg2; 858 p_nxge_ldg_t ldgp; 859 uint8_t channel; 860 uint32_t vindex; 861 npi_handle_t handle; 862 tx_cs_t cs; 863 p_tx_ring_t *tx_rings; 864 p_tx_ring_t tx_ring_p; 865 npi_status_t rs = NPI_SUCCESS; 866 uint_t serviced = DDI_INTR_UNCLAIMED; 867 nxge_status_t status = NXGE_OK; 868 869 if (ldvp == NULL) { 870 NXGE_DEBUG_MSG((NULL, INT_CTL, 871 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 872 nxgep, ldvp)); 873 return (DDI_INTR_UNCLAIMED); 874 } 875 876 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 877 nxgep = ldvp->nxgep; 878 } 879 NXGE_DEBUG_MSG((nxgep, INT_CTL, 880 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 881 nxgep, ldvp)); 882 /* 883 * This interrupt handler is for a specific 884 * transmit dma channel. 885 */ 886 handle = NXGE_DEV_NPI_HANDLE(nxgep); 887 /* Get the control and status for this channel. */ 888 channel = ldvp->channel; 889 ldgp = ldvp->ldgp; 890 NXGE_DEBUG_MSG((nxgep, INT_CTL, 891 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 892 "channel %d", 893 nxgep, ldvp, channel)); 894 895 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 896 vindex = ldvp->vdma_index; 897 NXGE_DEBUG_MSG((nxgep, INT_CTL, 898 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 899 channel, vindex, rs)); 900 if (!rs && cs.bits.ldw.mk) { 901 NXGE_DEBUG_MSG((nxgep, INT_CTL, 902 "==> nxge_tx_intr:channel %d ring index %d " 903 "status 0x%08x (mk bit set)", 904 channel, vindex, rs)); 905 tx_rings = nxgep->tx_rings->rings; 906 tx_ring_p = tx_rings[vindex]; 907 NXGE_DEBUG_MSG((nxgep, INT_CTL, 908 "==> nxge_tx_intr:channel %d ring index %d " 909 "status 0x%08x (mk bit set, calling reclaim)", 910 channel, vindex, rs)); 911 912 MUTEX_ENTER(&tx_ring_p->lock); 913 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 914 MUTEX_EXIT(&tx_ring_p->lock); 915 mac_tx_update(nxgep->mach); 916 } 917 918 /* 919 * Process other transmit control and status. 920 * Check the ldv state. 921 */ 922 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 923 /* 924 * Rearm this logical group if this is a single device 925 * group. 926 */ 927 if (ldgp->nldvs == 1) { 928 NXGE_DEBUG_MSG((nxgep, INT_CTL, 929 "==> nxge_tx_intr: rearm")); 930 if (status == NXGE_OK) { 931 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 932 B_TRUE, ldgp->ldg_timer); 933 } 934 } 935 936 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 937 serviced = DDI_INTR_CLAIMED; 938 return (serviced); 939 } 940 941 void 942 nxge_txdma_stop(p_nxge_t nxgep) 943 { 944 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 945 946 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 947 948 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 949 } 950 951 void 952 nxge_txdma_stop_start(p_nxge_t nxgep) 953 { 954 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 955 956 (void) nxge_txdma_stop(nxgep); 957 958 (void) nxge_fixup_txdma_rings(nxgep); 959 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 960 (void) nxge_tx_mac_enable(nxgep); 961 (void) nxge_txdma_hw_kick(nxgep); 962 963 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 964 } 965 966 nxge_status_t 967 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 968 { 969 int i, ndmas; 970 uint16_t channel; 971 p_tx_rings_t tx_rings; 972 p_tx_ring_t *tx_desc_rings; 973 npi_handle_t handle; 974 npi_status_t rs = NPI_SUCCESS; 975 nxge_status_t status = NXGE_OK; 976 977 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 978 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 979 980 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 981 NXGE_DEBUG_MSG((nxgep, TX_CTL, 982 "<== nxge_txdma_mode: not initialized")); 983 return (NXGE_ERROR); 984 } 985 986 tx_rings = nxgep->tx_rings; 987 if (tx_rings == NULL) { 988 NXGE_DEBUG_MSG((nxgep, TX_CTL, 989 "<== nxge_txdma_hw_mode: NULL global ring pointer")); 990 return (NXGE_ERROR); 991 } 992 993 tx_desc_rings = tx_rings->rings; 994 if (tx_desc_rings == NULL) { 995 NXGE_DEBUG_MSG((nxgep, TX_CTL, 996 "<== nxge_txdma_hw_mode: NULL rings pointer")); 997 return (NXGE_ERROR); 998 } 999 1000 ndmas = tx_rings->ndmas; 1001 if (!ndmas) { 1002 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1003 "<== nxge_txdma_hw_mode: no dma channel allocated")); 1004 return (NXGE_ERROR); 1005 } 1006 1007 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: " 1008 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1009 tx_rings, tx_desc_rings, ndmas)); 1010 1011 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1012 for (i = 0; i < ndmas; i++) { 1013 if (tx_desc_rings[i] == NULL) { 1014 continue; 1015 } 1016 channel = tx_desc_rings[i]->tdc; 1017 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1018 "==> nxge_txdma_hw_mode: channel %d", channel)); 1019 if (enable) { 1020 rs = npi_txdma_channel_enable(handle, channel); 1021 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1022 "==> nxge_txdma_hw_mode: channel %d (enable) " 1023 "rs 0x%x", channel, rs)); 1024 } else { 1025 /* 1026 * Stop the dma channel and waits for the stop done. 1027 * If the stop done bit is not set, then force 1028 * an error so TXC will stop. 1029 * All channels bound to this port need to be stopped 1030 * and reset after injecting an interrupt error. 1031 */ 1032 rs = npi_txdma_channel_disable(handle, channel); 1033 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1034 "==> nxge_txdma_hw_mode: channel %d (disable) " 1035 "rs 0x%x", channel, rs)); 1036 { 1037 tdmc_intr_dbg_t intr_dbg; 1038 1039 if (rs != NPI_SUCCESS) { 1040 /* Inject any error */ 1041 intr_dbg.value = 0; 1042 intr_dbg.bits.ldw.nack_pref = 1; 1043 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1044 "==> nxge_txdma_hw_mode: " 1045 "channel %d (stop failed 0x%x) " 1046 "(inject err)", rs, channel)); 1047 (void) npi_txdma_inj_int_error_set( 1048 handle, channel, &intr_dbg); 1049 rs = npi_txdma_channel_disable(handle, 1050 channel); 1051 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1052 "==> nxge_txdma_hw_mode: " 1053 "channel %d (stop again 0x%x) " 1054 "(after inject err)", 1055 rs, channel)); 1056 } 1057 } 1058 } 1059 } 1060 1061 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1062 1063 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1064 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1065 1066 return (status); 1067 } 1068 1069 void 1070 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1071 { 1072 npi_handle_t handle; 1073 1074 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1075 "==> nxge_txdma_enable_channel: channel %d", channel)); 1076 1077 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1078 /* enable the transmit dma channels */ 1079 (void) npi_txdma_channel_enable(handle, channel); 1080 1081 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1082 } 1083 1084 void 1085 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1086 { 1087 npi_handle_t handle; 1088 1089 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1090 "==> nxge_txdma_disable_channel: channel %d", channel)); 1091 1092 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1093 /* stop the transmit dma channels */ 1094 (void) npi_txdma_channel_disable(handle, channel); 1095 1096 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1097 } 1098 1099 int 1100 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1101 { 1102 npi_handle_t handle; 1103 tdmc_intr_dbg_t intr_dbg; 1104 int status; 1105 npi_status_t rs = NPI_SUCCESS; 1106 1107 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1108 /* 1109 * Stop the dma channel waits for the stop done. 1110 * If the stop done bit is not set, then create 1111 * an error. 1112 */ 1113 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1114 rs = npi_txdma_channel_disable(handle, channel); 1115 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1116 if (status == NXGE_OK) { 1117 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1118 "<== nxge_txdma_stop_inj_err (channel %d): " 1119 "stopped OK", channel)); 1120 return (status); 1121 } 1122 1123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1124 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1125 "injecting error", channel, rs)); 1126 /* Inject any error */ 1127 intr_dbg.value = 0; 1128 intr_dbg.bits.ldw.nack_pref = 1; 1129 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1130 1131 /* Stop done bit will be set as a result of error injection */ 1132 rs = npi_txdma_channel_disable(handle, channel); 1133 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1134 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1135 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1136 "<== nxge_txdma_stop_inj_err (channel %d): " 1137 "stopped OK ", channel)); 1138 return (status); 1139 } 1140 1141 #if defined(NXGE_DEBUG) 1142 nxge_txdma_regs_dump_channels(nxgep); 1143 #endif 1144 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1145 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1146 " (injected error but still not stopped)", channel, rs)); 1147 1148 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1149 return (status); 1150 } 1151 1152 void 1153 nxge_hw_start_tx(p_nxge_t nxgep) 1154 { 1155 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx")); 1156 1157 (void) nxge_txdma_hw_start(nxgep); 1158 (void) nxge_tx_mac_enable(nxgep); 1159 1160 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx")); 1161 } 1162 1163 /*ARGSUSED*/ 1164 void 1165 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1166 { 1167 int index, ndmas; 1168 uint16_t channel; 1169 p_tx_rings_t tx_rings; 1170 1171 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1172 1173 /* 1174 * For each transmit channel, reclaim each descriptor and 1175 * free buffers. 1176 */ 1177 tx_rings = nxgep->tx_rings; 1178 if (tx_rings == NULL) { 1179 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1180 "<== nxge_fixup_txdma_rings: NULL ring pointer")); 1181 return; 1182 } 1183 1184 ndmas = tx_rings->ndmas; 1185 if (!ndmas) { 1186 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1187 "<== nxge_fixup_txdma_rings: no channel allocated")); 1188 return; 1189 } 1190 1191 if (tx_rings->rings == NULL) { 1192 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1193 "<== nxge_fixup_txdma_rings: NULL rings pointer")); 1194 return; 1195 } 1196 1197 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: " 1198 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1199 tx_rings, tx_rings->rings, ndmas)); 1200 1201 for (index = 0; index < ndmas; index++) { 1202 channel = tx_rings->rings[index]->tdc; 1203 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1204 "==> nxge_fixup_txdma_rings: channel %d", channel)); 1205 1206 nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index], 1207 channel); 1208 } 1209 1210 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1211 } 1212 1213 /*ARGSUSED*/ 1214 void 1215 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1216 { 1217 p_tx_ring_t ring_p; 1218 1219 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1220 ring_p = nxge_txdma_get_ring(nxgep, channel); 1221 if (ring_p == NULL) { 1222 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1223 return; 1224 } 1225 1226 if (ring_p->tdc != channel) { 1227 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1228 "<== nxge_txdma_fix_channel: channel not matched " 1229 "ring tdc %d passed channel", 1230 ring_p->tdc, channel)); 1231 return; 1232 } 1233 1234 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1235 1236 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1237 } 1238 1239 /*ARGSUSED*/ 1240 void 1241 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1242 { 1243 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1244 1245 if (ring_p == NULL) { 1246 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1247 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1248 return; 1249 } 1250 1251 if (ring_p->tdc != channel) { 1252 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1253 "<== nxge_txdma_fixup_channel: channel not matched " 1254 "ring tdc %d passed channel", 1255 ring_p->tdc, channel)); 1256 return; 1257 } 1258 1259 MUTEX_ENTER(&ring_p->lock); 1260 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1261 ring_p->rd_index = 0; 1262 ring_p->wr_index = 0; 1263 ring_p->ring_head.value = 0; 1264 ring_p->ring_kick_tail.value = 0; 1265 ring_p->descs_pending = 0; 1266 MUTEX_EXIT(&ring_p->lock); 1267 1268 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1269 } 1270 1271 /*ARGSUSED*/ 1272 void 1273 nxge_txdma_hw_kick(p_nxge_t nxgep) 1274 { 1275 int index, ndmas; 1276 uint16_t channel; 1277 p_tx_rings_t tx_rings; 1278 1279 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1280 1281 tx_rings = nxgep->tx_rings; 1282 if (tx_rings == NULL) { 1283 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1284 "<== nxge_txdma_hw_kick: NULL ring pointer")); 1285 return; 1286 } 1287 1288 ndmas = tx_rings->ndmas; 1289 if (!ndmas) { 1290 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1291 "<== nxge_txdma_hw_kick: no channel allocated")); 1292 return; 1293 } 1294 1295 if (tx_rings->rings == NULL) { 1296 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1297 "<== nxge_txdma_hw_kick: NULL rings pointer")); 1298 return; 1299 } 1300 1301 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: " 1302 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1303 tx_rings, tx_rings->rings, ndmas)); 1304 1305 for (index = 0; index < ndmas; index++) { 1306 channel = tx_rings->rings[index]->tdc; 1307 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1308 "==> nxge_txdma_hw_kick: channel %d", channel)); 1309 nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index], 1310 channel); 1311 } 1312 1313 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1314 } 1315 1316 /*ARGSUSED*/ 1317 void 1318 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1319 { 1320 p_tx_ring_t ring_p; 1321 1322 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1323 1324 ring_p = nxge_txdma_get_ring(nxgep, channel); 1325 if (ring_p == NULL) { 1326 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1327 " nxge_txdma_kick_channel")); 1328 return; 1329 } 1330 1331 if (ring_p->tdc != channel) { 1332 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1333 "<== nxge_txdma_kick_channel: channel not matched " 1334 "ring tdc %d passed channel", 1335 ring_p->tdc, channel)); 1336 return; 1337 } 1338 1339 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1340 1341 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1342 } 1343 1344 /*ARGSUSED*/ 1345 void 1346 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1347 { 1348 1349 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1350 1351 if (ring_p == NULL) { 1352 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1353 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1354 return; 1355 } 1356 1357 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1358 } 1359 1360 /*ARGSUSED*/ 1361 void 1362 nxge_check_tx_hang(p_nxge_t nxgep) 1363 { 1364 1365 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1366 1367 /* 1368 * Needs inputs from hardware for regs: 1369 * head index had not moved since last timeout. 1370 * packets not transmitted or stuffed registers. 1371 */ 1372 if (nxge_txdma_hung(nxgep)) { 1373 nxge_fixup_hung_txdma_rings(nxgep); 1374 } 1375 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1376 } 1377 1378 int 1379 nxge_txdma_hung(p_nxge_t nxgep) 1380 { 1381 int index, ndmas; 1382 uint16_t channel; 1383 p_tx_rings_t tx_rings; 1384 p_tx_ring_t tx_ring_p; 1385 1386 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1387 tx_rings = nxgep->tx_rings; 1388 if (tx_rings == NULL) { 1389 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1390 "<== nxge_txdma_hung: NULL ring pointer")); 1391 return (B_FALSE); 1392 } 1393 1394 ndmas = tx_rings->ndmas; 1395 if (!ndmas) { 1396 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1397 "<== nxge_txdma_hung: no channel " 1398 "allocated")); 1399 return (B_FALSE); 1400 } 1401 1402 if (tx_rings->rings == NULL) { 1403 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1404 "<== nxge_txdma_hung: NULL rings pointer")); 1405 return (B_FALSE); 1406 } 1407 1408 for (index = 0; index < ndmas; index++) { 1409 channel = tx_rings->rings[index]->tdc; 1410 tx_ring_p = tx_rings->rings[index]; 1411 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1412 "==> nxge_txdma_hung: channel %d", channel)); 1413 if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) { 1414 return (B_TRUE); 1415 } 1416 } 1417 1418 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1419 1420 return (B_FALSE); 1421 } 1422 1423 int 1424 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1425 { 1426 uint16_t head_index, tail_index; 1427 boolean_t head_wrap, tail_wrap; 1428 npi_handle_t handle; 1429 tx_ring_hdl_t tx_head; 1430 uint_t tx_rd_index; 1431 1432 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1433 1434 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1435 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1436 "==> nxge_txdma_channel_hung: channel %d", channel)); 1437 MUTEX_ENTER(&tx_ring_p->lock); 1438 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1439 1440 tail_index = tx_ring_p->wr_index; 1441 tail_wrap = tx_ring_p->wr_index_wrap; 1442 tx_rd_index = tx_ring_p->rd_index; 1443 MUTEX_EXIT(&tx_ring_p->lock); 1444 1445 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1446 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1447 "tail_index %d tail_wrap %d ", 1448 channel, tx_rd_index, tail_index, tail_wrap)); 1449 /* 1450 * Read the hardware maintained transmit head 1451 * and wrap around bit. 1452 */ 1453 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1454 head_index = tx_head.bits.ldw.head; 1455 head_wrap = tx_head.bits.ldw.wrap; 1456 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1457 "==> nxge_txdma_channel_hung: " 1458 "tx_rd_index %d tail %d tail_wrap %d " 1459 "head %d wrap %d", 1460 tx_rd_index, tail_index, tail_wrap, 1461 head_index, head_wrap)); 1462 1463 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1464 tail_index, tail_wrap) && 1465 (head_index == tx_rd_index)) { 1466 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1467 "==> nxge_txdma_channel_hung: EMPTY")); 1468 return (B_FALSE); 1469 } 1470 1471 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1472 "==> nxge_txdma_channel_hung: Checking if ring full")); 1473 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1474 tail_wrap)) { 1475 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1476 "==> nxge_txdma_channel_hung: full")); 1477 return (B_TRUE); 1478 } 1479 1480 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1481 1482 return (B_FALSE); 1483 } 1484 1485 /*ARGSUSED*/ 1486 void 1487 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1488 { 1489 int index, ndmas; 1490 uint16_t channel; 1491 p_tx_rings_t tx_rings; 1492 1493 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1494 tx_rings = nxgep->tx_rings; 1495 if (tx_rings == NULL) { 1496 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1497 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer")); 1498 return; 1499 } 1500 1501 ndmas = tx_rings->ndmas; 1502 if (!ndmas) { 1503 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1504 "<== nxge_fixup_hung_txdma_rings: no channel " 1505 "allocated")); 1506 return; 1507 } 1508 1509 if (tx_rings->rings == NULL) { 1510 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1511 "<== nxge_fixup_hung_txdma_rings: NULL rings pointer")); 1512 return; 1513 } 1514 1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: " 1516 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1517 tx_rings, tx_rings->rings, ndmas)); 1518 1519 for (index = 0; index < ndmas; index++) { 1520 channel = tx_rings->rings[index]->tdc; 1521 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1522 "==> nxge_fixup_hung_txdma_rings: channel %d", 1523 channel)); 1524 1525 nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index], 1526 channel); 1527 } 1528 1529 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1530 } 1531 1532 /*ARGSUSED*/ 1533 void 1534 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1535 { 1536 p_tx_ring_t ring_p; 1537 1538 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1539 ring_p = nxge_txdma_get_ring(nxgep, channel); 1540 if (ring_p == NULL) { 1541 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1542 "<== nxge_txdma_fix_hung_channel")); 1543 return; 1544 } 1545 1546 if (ring_p->tdc != channel) { 1547 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1548 "<== nxge_txdma_fix_hung_channel: channel not matched " 1549 "ring tdc %d passed channel", 1550 ring_p->tdc, channel)); 1551 return; 1552 } 1553 1554 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1555 1556 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1557 } 1558 1559 /*ARGSUSED*/ 1560 void 1561 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1562 uint16_t channel) 1563 { 1564 npi_handle_t handle; 1565 tdmc_intr_dbg_t intr_dbg; 1566 int status = NXGE_OK; 1567 1568 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1569 1570 if (ring_p == NULL) { 1571 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1572 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1573 return; 1574 } 1575 1576 if (ring_p->tdc != channel) { 1577 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1578 "<== nxge_txdma_fixup_hung_channel: channel " 1579 "not matched " 1580 "ring tdc %d passed channel", 1581 ring_p->tdc, channel)); 1582 return; 1583 } 1584 1585 /* Reclaim descriptors */ 1586 MUTEX_ENTER(&ring_p->lock); 1587 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1588 MUTEX_EXIT(&ring_p->lock); 1589 1590 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1591 /* 1592 * Stop the dma channel waits for the stop done. 1593 * If the stop done bit is not set, then force 1594 * an error. 1595 */ 1596 status = npi_txdma_channel_disable(handle, channel); 1597 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1598 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1599 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1600 "ring tdc %d passed channel %d", 1601 ring_p->tdc, channel)); 1602 return; 1603 } 1604 1605 /* Inject any error */ 1606 intr_dbg.value = 0; 1607 intr_dbg.bits.ldw.nack_pref = 1; 1608 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1609 1610 /* Stop done bit will be set as a result of error injection */ 1611 status = npi_txdma_channel_disable(handle, channel); 1612 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1613 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1614 "<== nxge_txdma_fixup_hung_channel: stopped again" 1615 "ring tdc %d passed channel", 1616 ring_p->tdc, channel)); 1617 return; 1618 } 1619 1620 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1621 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1622 "ring tdc %d passed channel", 1623 ring_p->tdc, channel)); 1624 1625 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1626 } 1627 1628 /*ARGSUSED*/ 1629 void 1630 nxge_reclaim_rings(p_nxge_t nxgep) 1631 { 1632 int index, ndmas; 1633 uint16_t channel; 1634 p_tx_rings_t tx_rings; 1635 p_tx_ring_t tx_ring_p; 1636 1637 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring")); 1638 tx_rings = nxgep->tx_rings; 1639 if (tx_rings == NULL) { 1640 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1641 "<== nxge_reclain_rimgs: NULL ring pointer")); 1642 return; 1643 } 1644 1645 ndmas = tx_rings->ndmas; 1646 if (!ndmas) { 1647 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1648 "<== nxge_reclain_rimgs: no channel " 1649 "allocated")); 1650 return; 1651 } 1652 1653 if (tx_rings->rings == NULL) { 1654 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1655 "<== nxge_reclain_rimgs: NULL rings pointer")); 1656 return; 1657 } 1658 1659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: " 1660 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1661 tx_rings, tx_rings->rings, ndmas)); 1662 1663 for (index = 0; index < ndmas; index++) { 1664 channel = tx_rings->rings[index]->tdc; 1665 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1666 "==> reclain_rimgs: channel %d", 1667 channel)); 1668 tx_ring_p = tx_rings->rings[index]; 1669 MUTEX_ENTER(&tx_ring_p->lock); 1670 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel); 1671 MUTEX_EXIT(&tx_ring_p->lock); 1672 } 1673 1674 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1675 } 1676 1677 void 1678 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1679 { 1680 int index, ndmas; 1681 uint16_t channel; 1682 p_tx_rings_t tx_rings; 1683 npi_handle_t handle; 1684 1685 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels")); 1686 1687 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1688 (void) npi_txdma_dump_fzc_regs(handle); 1689 1690 tx_rings = nxgep->tx_rings; 1691 if (tx_rings == NULL) { 1692 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1693 "<== nxge_txdma_regs_dump_channels: NULL ring")); 1694 return; 1695 } 1696 1697 ndmas = tx_rings->ndmas; 1698 if (!ndmas) { 1699 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1700 "<== nxge_txdma_regs_dump_channels: " 1701 "no channel allocated")); 1702 return; 1703 } 1704 1705 if (tx_rings->rings == NULL) { 1706 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1707 "<== nxge_txdma_regs_dump_channels: NULL rings")); 1708 return; 1709 } 1710 1711 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: " 1712 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1713 tx_rings, tx_rings->rings, ndmas)); 1714 1715 for (index = 0; index < ndmas; index++) { 1716 channel = tx_rings->rings[index]->tdc; 1717 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1718 "==> nxge_txdma_regs_dump_channels: channel %d", 1719 channel)); 1720 (void) npi_txdma_dump_tdc_regs(handle, channel); 1721 } 1722 1723 /* Dump TXC registers */ 1724 (void) npi_txc_dump_fzc_regs(handle); 1725 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 1726 1727 for (index = 0; index < ndmas; index++) { 1728 channel = tx_rings->rings[index]->tdc; 1729 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1730 "==> nxge_txdma_regs_dump_channels: channel %d", 1731 channel)); 1732 (void) npi_txc_dump_tdc_fzc_regs(handle, channel); 1733 } 1734 1735 for (index = 0; index < ndmas; index++) { 1736 channel = tx_rings->rings[index]->tdc; 1737 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1738 "==> nxge_txdma_regs_dump_channels: channel %d", 1739 channel)); 1740 nxge_txdma_regs_dump(nxgep, channel); 1741 } 1742 1743 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 1744 1745 } 1746 1747 void 1748 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 1749 { 1750 npi_handle_t handle; 1751 tx_ring_hdl_t hdl; 1752 tx_ring_kick_t kick; 1753 tx_cs_t cs; 1754 txc_control_t control; 1755 uint32_t bitmap = 0; 1756 uint32_t burst = 0; 1757 uint32_t bytes = 0; 1758 dma_log_page_t cfg; 1759 1760 printf("\n\tfunc # %d tdc %d ", 1761 nxgep->function_num, channel); 1762 cfg.page_num = 0; 1763 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1764 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1765 printf("\n\tlog page func %d valid page 0 %d", 1766 cfg.func_num, cfg.valid); 1767 cfg.page_num = 1; 1768 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1769 printf("\n\tlog page func %d valid page 1 %d", 1770 cfg.func_num, cfg.valid); 1771 1772 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 1773 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 1774 printf("\n\thead value is 0x%0llx", 1775 (long long)hdl.value); 1776 printf("\n\thead index %d", hdl.bits.ldw.head); 1777 printf("\n\tkick value is 0x%0llx", 1778 (long long)kick.value); 1779 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 1780 1781 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 1782 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 1783 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 1784 1785 (void) npi_txc_control(handle, OP_GET, &control); 1786 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 1787 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 1788 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 1789 1790 printf("\n\tTXC port control 0x%0llx", 1791 (long long)control.value); 1792 printf("\n\tTXC port bitmap 0x%x", bitmap); 1793 printf("\n\tTXC max burst %d", burst); 1794 printf("\n\tTXC bytes xmt %d\n", bytes); 1795 1796 { 1797 ipp_status_t status; 1798 1799 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 1800 #if defined(__i386) 1801 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 1802 #else 1803 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 1804 #endif 1805 } 1806 } 1807 1808 /* 1809 * Static functions start here. 1810 */ 1811 static nxge_status_t 1812 nxge_map_txdma(p_nxge_t nxgep) 1813 { 1814 int i, ndmas; 1815 uint16_t channel; 1816 p_tx_rings_t tx_rings; 1817 p_tx_ring_t *tx_desc_rings; 1818 p_tx_mbox_areas_t tx_mbox_areas_p; 1819 p_tx_mbox_t *tx_mbox_p; 1820 p_nxge_dma_pool_t dma_buf_poolp; 1821 p_nxge_dma_pool_t dma_cntl_poolp; 1822 p_nxge_dma_common_t *dma_buf_p; 1823 p_nxge_dma_common_t *dma_cntl_p; 1824 nxge_status_t status = NXGE_OK; 1825 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1826 p_nxge_dma_common_t t_dma_buf_p; 1827 p_nxge_dma_common_t t_dma_cntl_p; 1828 #endif 1829 1830 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 1831 1832 dma_buf_poolp = nxgep->tx_buf_pool_p; 1833 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 1834 1835 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1836 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1837 "==> nxge_map_txdma: buf not allocated")); 1838 return (NXGE_ERROR); 1839 } 1840 1841 ndmas = dma_buf_poolp->ndmas; 1842 if (!ndmas) { 1843 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1844 "<== nxge_map_txdma: no dma allocated")); 1845 return (NXGE_ERROR); 1846 } 1847 1848 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1849 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1850 1851 tx_rings = (p_tx_rings_t) 1852 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1853 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1854 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1855 1856 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1857 "tx_rings $%p tx_desc_rings $%p", 1858 tx_rings, tx_desc_rings)); 1859 1860 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1861 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1862 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1863 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1864 1865 /* 1866 * Map descriptors from the buffer pools for each dma channel. 1867 */ 1868 for (i = 0; i < ndmas; i++) { 1869 /* 1870 * Set up and prepare buffer blocks, descriptors 1871 * and mailbox. 1872 */ 1873 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1874 status = nxge_map_txdma_channel(nxgep, channel, 1875 (p_nxge_dma_common_t *)&dma_buf_p[i], 1876 (p_tx_ring_t *)&tx_desc_rings[i], 1877 dma_buf_poolp->num_chunks[i], 1878 (p_nxge_dma_common_t *)&dma_cntl_p[i], 1879 (p_tx_mbox_t *)&tx_mbox_p[i]); 1880 if (status != NXGE_OK) { 1881 goto nxge_map_txdma_fail1; 1882 } 1883 tx_desc_rings[i]->index = (uint16_t)i; 1884 tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i]; 1885 1886 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1887 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 1888 tx_desc_rings[i]->hv_set = B_FALSE; 1889 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 1890 t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i]; 1891 1892 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp = 1893 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 1894 tx_desc_rings[i]->hv_tx_buf_ioaddr_size = 1895 (uint64_t)t_dma_buf_p->orig_alength; 1896 1897 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1898 "==> nxge_map_txdma_channel: " 1899 "hv data buf base io $%p " 1900 "size 0x%llx (%d) " 1901 "buf base io $%p " 1902 "orig vatopa base io $%p " 1903 "orig_len 0x%llx (%d)", 1904 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp, 1905 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1906 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1907 t_dma_buf_p->ioaddr_pp, 1908 t_dma_buf_p->orig_vatopa, 1909 t_dma_buf_p->orig_alength, 1910 t_dma_buf_p->orig_alength)); 1911 1912 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp = 1913 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 1914 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size = 1915 (uint64_t)t_dma_cntl_p->orig_alength; 1916 1917 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1918 "==> nxge_map_txdma_channel: " 1919 "hv cntl base io $%p " 1920 "orig ioaddr_pp ($%p) " 1921 "orig vatopa ($%p) " 1922 "size 0x%llx (%d 0x%x)", 1923 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp, 1924 t_dma_cntl_p->orig_ioaddr_pp, 1925 t_dma_cntl_p->orig_vatopa, 1926 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size, 1927 t_dma_cntl_p->orig_alength, 1928 t_dma_cntl_p->orig_alength)); 1929 } 1930 #endif 1931 } 1932 1933 tx_rings->ndmas = ndmas; 1934 tx_rings->rings = tx_desc_rings; 1935 nxgep->tx_rings = tx_rings; 1936 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1937 nxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1938 1939 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1940 "tx_rings $%p rings $%p", 1941 nxgep->tx_rings, nxgep->tx_rings->rings)); 1942 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1943 "tx_rings $%p tx_desc_rings $%p", 1944 nxgep->tx_rings, tx_desc_rings)); 1945 1946 goto nxge_map_txdma_exit; 1947 1948 nxge_map_txdma_fail1: 1949 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1950 "==> nxge_map_txdma: uninit tx desc " 1951 "(status 0x%x channel %d i %d)", 1952 nxgep, status, channel, i)); 1953 i--; 1954 for (; i >= 0; i--) { 1955 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1956 nxge_unmap_txdma_channel(nxgep, channel, 1957 tx_desc_rings[i], 1958 tx_mbox_p[i]); 1959 } 1960 1961 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1962 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1963 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1964 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1965 1966 nxge_map_txdma_exit: 1967 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1968 "==> nxge_map_txdma: " 1969 "(status 0x%x channel %d)", 1970 status, channel)); 1971 1972 return (status); 1973 } 1974 1975 static void 1976 nxge_unmap_txdma(p_nxge_t nxgep) 1977 { 1978 int i, ndmas; 1979 uint8_t channel; 1980 p_tx_rings_t tx_rings; 1981 p_tx_ring_t *tx_desc_rings; 1982 p_tx_mbox_areas_t tx_mbox_areas_p; 1983 p_tx_mbox_t *tx_mbox_p; 1984 p_nxge_dma_pool_t dma_buf_poolp; 1985 1986 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma")); 1987 1988 dma_buf_poolp = nxgep->tx_buf_pool_p; 1989 if (!dma_buf_poolp->buf_allocated) { 1990 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1991 "==> nxge_unmap_txdma: buf not allocated")); 1992 return; 1993 } 1994 1995 ndmas = dma_buf_poolp->ndmas; 1996 if (!ndmas) { 1997 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1998 "<== nxge_unmap_txdma: no dma allocated")); 1999 return; 2000 } 2001 2002 tx_rings = nxgep->tx_rings; 2003 tx_desc_rings = tx_rings->rings; 2004 if (tx_rings == NULL) { 2005 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2006 "<== nxge_unmap_txdma: NULL ring pointer")); 2007 return; 2008 } 2009 2010 tx_desc_rings = tx_rings->rings; 2011 if (tx_desc_rings == NULL) { 2012 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2013 "<== nxge_unmap_txdma: NULL ring pointers")); 2014 return; 2015 } 2016 2017 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: " 2018 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2019 tx_rings, tx_desc_rings, ndmas)); 2020 2021 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2022 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2023 2024 for (i = 0; i < ndmas; i++) { 2025 channel = tx_desc_rings[i]->tdc; 2026 (void) nxge_unmap_txdma_channel(nxgep, channel, 2027 (p_tx_ring_t)tx_desc_rings[i], 2028 (p_tx_mbox_t)tx_mbox_p[i]); 2029 } 2030 2031 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 2032 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 2033 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 2034 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 2035 2036 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2037 "<== nxge_unmap_txdma")); 2038 } 2039 2040 static nxge_status_t 2041 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2042 p_nxge_dma_common_t *dma_buf_p, 2043 p_tx_ring_t *tx_desc_p, 2044 uint32_t num_chunks, 2045 p_nxge_dma_common_t *dma_cntl_p, 2046 p_tx_mbox_t *tx_mbox_p) 2047 { 2048 int status = NXGE_OK; 2049 2050 /* 2051 * Set up and prepare buffer blocks, descriptors 2052 * and mailbox. 2053 */ 2054 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2055 "==> nxge_map_txdma_channel (channel %d)", channel)); 2056 /* 2057 * Transmit buffer blocks 2058 */ 2059 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2060 dma_buf_p, tx_desc_p, num_chunks); 2061 if (status != NXGE_OK) { 2062 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2063 "==> nxge_map_txdma_channel (channel %d): " 2064 "map buffer failed 0x%x", channel, status)); 2065 goto nxge_map_txdma_channel_exit; 2066 } 2067 2068 /* 2069 * Transmit block ring, and mailbox. 2070 */ 2071 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2072 tx_mbox_p); 2073 2074 goto nxge_map_txdma_channel_exit; 2075 2076 nxge_map_txdma_channel_fail1: 2077 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2078 "==> nxge_map_txdma_channel: unmap buf" 2079 "(status 0x%x channel %d)", 2080 status, channel)); 2081 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2082 2083 nxge_map_txdma_channel_exit: 2084 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2085 "<== nxge_map_txdma_channel: " 2086 "(status 0x%x channel %d)", 2087 status, channel)); 2088 2089 return (status); 2090 } 2091 2092 /*ARGSUSED*/ 2093 static void 2094 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2095 p_tx_ring_t tx_ring_p, 2096 p_tx_mbox_t tx_mbox_p) 2097 { 2098 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2099 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2100 /* 2101 * unmap tx block ring, and mailbox. 2102 */ 2103 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, 2104 tx_ring_p, tx_mbox_p); 2105 2106 /* unmap buffer blocks */ 2107 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p); 2108 2109 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2110 } 2111 2112 /*ARGSUSED*/ 2113 static void 2114 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2115 p_nxge_dma_common_t *dma_cntl_p, 2116 p_tx_ring_t tx_ring_p, 2117 p_tx_mbox_t *tx_mbox_p) 2118 { 2119 p_tx_mbox_t mboxp; 2120 p_nxge_dma_common_t cntl_dmap; 2121 p_nxge_dma_common_t dmap; 2122 p_tx_rng_cfig_t tx_ring_cfig_p; 2123 p_tx_ring_kick_t tx_ring_kick_p; 2124 p_tx_cs_t tx_cs_p; 2125 p_tx_dma_ent_msk_t tx_evmask_p; 2126 p_txdma_mbh_t mboxh_p; 2127 p_txdma_mbl_t mboxl_p; 2128 uint64_t tx_desc_len; 2129 2130 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2131 "==> nxge_map_txdma_channel_cfg_ring")); 2132 2133 cntl_dmap = *dma_cntl_p; 2134 2135 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2136 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2137 sizeof (tx_desc_t)); 2138 /* 2139 * Zero out transmit ring descriptors. 2140 */ 2141 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2142 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2143 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2144 tx_cs_p = &(tx_ring_p->tx_cs); 2145 tx_evmask_p = &(tx_ring_p->tx_evmask); 2146 tx_ring_cfig_p->value = 0; 2147 tx_ring_kick_p->value = 0; 2148 tx_cs_p->value = 0; 2149 tx_evmask_p->value = 0; 2150 2151 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2152 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2153 dma_channel, 2154 dmap->dma_cookie.dmac_laddress)); 2155 2156 tx_ring_cfig_p->value = 0; 2157 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2158 tx_ring_cfig_p->value = 2159 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2160 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2161 2162 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2163 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2164 dma_channel, 2165 tx_ring_cfig_p->value)); 2166 2167 tx_cs_p->bits.ldw.rst = 1; 2168 2169 /* Map in mailbox */ 2170 mboxp = (p_tx_mbox_t) 2171 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2172 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2173 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2174 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2175 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2176 mboxh_p->value = mboxl_p->value = 0; 2177 2178 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2179 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2180 dmap->dma_cookie.dmac_laddress)); 2181 2182 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2183 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2184 2185 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2186 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2187 2188 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2189 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2190 dmap->dma_cookie.dmac_laddress)); 2191 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2192 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2193 "mbox $%p", 2194 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2195 tx_ring_p->page_valid.value = 0; 2196 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2197 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2198 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2199 tx_ring_p->page_hdl.value = 0; 2200 2201 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2202 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2203 2204 tx_ring_p->max_burst.value = 0; 2205 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2206 2207 *tx_mbox_p = mboxp; 2208 2209 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2210 "<== nxge_map_txdma_channel_cfg_ring")); 2211 } 2212 2213 /*ARGSUSED*/ 2214 static void 2215 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2216 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2217 { 2218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2219 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2220 tx_ring_p->tdc)); 2221 2222 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2223 2224 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2225 "<== nxge_unmap_txdma_channel_cfg_ring")); 2226 } 2227 2228 static nxge_status_t 2229 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2230 p_nxge_dma_common_t *dma_buf_p, 2231 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2232 { 2233 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2234 p_nxge_dma_common_t dmap; 2235 nxge_os_dma_handle_t tx_buf_dma_handle; 2236 p_tx_ring_t tx_ring_p; 2237 p_tx_msg_t tx_msg_ring; 2238 nxge_status_t status = NXGE_OK; 2239 int ddi_status = DDI_SUCCESS; 2240 int i, j, index; 2241 uint32_t size, bsize; 2242 uint32_t nblocks, nmsgs; 2243 2244 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2245 "==> nxge_map_txdma_channel_buf_ring")); 2246 2247 dma_bufp = tmp_bufp = *dma_buf_p; 2248 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2249 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2250 "chunks bufp $%p", 2251 channel, num_chunks, dma_bufp)); 2252 2253 nmsgs = 0; 2254 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2255 nmsgs += tmp_bufp->nblocks; 2256 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2257 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2258 "bufp $%p nblocks %d nmsgs %d", 2259 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2260 } 2261 if (!nmsgs) { 2262 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2263 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2264 "no msg blocks", 2265 channel)); 2266 status = NXGE_ERROR; 2267 goto nxge_map_txdma_channel_buf_ring_exit; 2268 } 2269 2270 tx_ring_p = (p_tx_ring_t) 2271 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2272 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2273 (void *)nxgep->interrupt_cookie); 2274 2275 tx_ring_p->nxgep = nxgep; 2276 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2277 nxge_serial_tx, tx_ring_p); 2278 /* 2279 * Allocate transmit message rings and handles for packets 2280 * not to be copied to premapped buffers. 2281 */ 2282 size = nmsgs * sizeof (tx_msg_t); 2283 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2284 for (i = 0; i < nmsgs; i++) { 2285 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2286 DDI_DMA_DONTWAIT, 0, 2287 &tx_msg_ring[i].dma_handle); 2288 if (ddi_status != DDI_SUCCESS) { 2289 status |= NXGE_DDI_FAILED; 2290 break; 2291 } 2292 } 2293 if (i < nmsgs) { 2294 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2295 "Allocate handles failed.")); 2296 goto nxge_map_txdma_channel_buf_ring_fail1; 2297 } 2298 2299 tx_ring_p->tdc = channel; 2300 tx_ring_p->tx_msg_ring = tx_msg_ring; 2301 tx_ring_p->tx_ring_size = nmsgs; 2302 tx_ring_p->num_chunks = num_chunks; 2303 if (!nxge_tx_intr_thres) { 2304 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2305 } 2306 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2307 tx_ring_p->rd_index = 0; 2308 tx_ring_p->wr_index = 0; 2309 tx_ring_p->ring_head.value = 0; 2310 tx_ring_p->ring_kick_tail.value = 0; 2311 tx_ring_p->descs_pending = 0; 2312 2313 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2314 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2315 "actual tx desc max %d nmsgs %d " 2316 "(config nxge_tx_ring_size %d)", 2317 channel, tx_ring_p->tx_ring_size, nmsgs, 2318 nxge_tx_ring_size)); 2319 2320 /* 2321 * Map in buffers from the buffer pool. 2322 */ 2323 index = 0; 2324 bsize = dma_bufp->block_size; 2325 2326 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2327 "dma_bufp $%p tx_rng_p $%p " 2328 "tx_msg_rng_p $%p bsize %d", 2329 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2330 2331 tx_buf_dma_handle = dma_bufp->dma_handle; 2332 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2333 bsize = dma_bufp->block_size; 2334 nblocks = dma_bufp->nblocks; 2335 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2336 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2337 "size %d dma_bufp $%p", 2338 i, sizeof (nxge_dma_common_t), dma_bufp)); 2339 2340 for (j = 0; j < nblocks; j++) { 2341 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2342 dmap = &tx_msg_ring[index++].buf_dma; 2343 #ifdef TX_MEM_DEBUG 2344 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2345 "==> nxge_map_txdma_channel_buf_ring: j %d" 2346 "dmap $%p", i, dmap)); 2347 #endif 2348 nxge_setup_dma_common(dmap, dma_bufp, 1, 2349 bsize); 2350 } 2351 } 2352 2353 if (i < num_chunks) { 2354 status = NXGE_ERROR; 2355 goto nxge_map_txdma_channel_buf_ring_fail1; 2356 } 2357 2358 *tx_desc_p = tx_ring_p; 2359 2360 goto nxge_map_txdma_channel_buf_ring_exit; 2361 2362 nxge_map_txdma_channel_buf_ring_fail1: 2363 if (tx_ring_p->serial) { 2364 nxge_serialize_destroy(tx_ring_p->serial); 2365 tx_ring_p->serial = NULL; 2366 } 2367 2368 index--; 2369 for (; index >= 0; index--) { 2370 if (tx_msg_ring[index].dma_handle != NULL) { 2371 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2372 } 2373 } 2374 MUTEX_DESTROY(&tx_ring_p->lock); 2375 KMEM_FREE(tx_msg_ring, size); 2376 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2377 2378 status = NXGE_ERROR; 2379 2380 nxge_map_txdma_channel_buf_ring_exit: 2381 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2382 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2383 2384 return (status); 2385 } 2386 2387 /*ARGSUSED*/ 2388 static void 2389 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2390 { 2391 p_tx_msg_t tx_msg_ring; 2392 p_tx_msg_t tx_msg_p; 2393 int i; 2394 2395 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2396 "==> nxge_unmap_txdma_channel_buf_ring")); 2397 if (tx_ring_p == NULL) { 2398 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2399 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2400 return; 2401 } 2402 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2403 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2404 tx_ring_p->tdc)); 2405 2406 tx_msg_ring = tx_ring_p->tx_msg_ring; 2407 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2408 tx_msg_p = &tx_msg_ring[i]; 2409 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2410 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2411 "entry = %d", 2412 i)); 2413 (void) dvma_unload(tx_msg_p->dvma_handle, 2414 0, -1); 2415 tx_msg_p->dvma_handle = NULL; 2416 if (tx_ring_p->dvma_wr_index == 2417 tx_ring_p->dvma_wrap_mask) { 2418 tx_ring_p->dvma_wr_index = 0; 2419 } else { 2420 tx_ring_p->dvma_wr_index++; 2421 } 2422 tx_ring_p->dvma_pending--; 2423 } else if (tx_msg_p->flags.dma_type == 2424 USE_DMA) { 2425 if (ddi_dma_unbind_handle 2426 (tx_msg_p->dma_handle)) { 2427 cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: " 2428 "ddi_dma_unbind_handle " 2429 "failed."); 2430 } 2431 } 2432 2433 if (tx_msg_p->tx_message != NULL) { 2434 freemsg(tx_msg_p->tx_message); 2435 tx_msg_p->tx_message = NULL; 2436 } 2437 } 2438 2439 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2440 if (tx_msg_ring[i].dma_handle != NULL) { 2441 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2442 } 2443 } 2444 2445 if (tx_ring_p->serial) { 2446 nxge_serialize_destroy(tx_ring_p->serial); 2447 tx_ring_p->serial = NULL; 2448 } 2449 2450 MUTEX_DESTROY(&tx_ring_p->lock); 2451 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2452 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2453 2454 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2455 "<== nxge_unmap_txdma_channel_buf_ring")); 2456 } 2457 2458 static nxge_status_t 2459 nxge_txdma_hw_start(p_nxge_t nxgep) 2460 { 2461 int i, ndmas; 2462 uint16_t channel; 2463 p_tx_rings_t tx_rings; 2464 p_tx_ring_t *tx_desc_rings; 2465 p_tx_mbox_areas_t tx_mbox_areas_p; 2466 p_tx_mbox_t *tx_mbox_p; 2467 nxge_status_t status = NXGE_OK; 2468 2469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2470 2471 tx_rings = nxgep->tx_rings; 2472 if (tx_rings == NULL) { 2473 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2474 "<== nxge_txdma_hw_start: NULL ring pointer")); 2475 return (NXGE_ERROR); 2476 } 2477 tx_desc_rings = tx_rings->rings; 2478 if (tx_desc_rings == NULL) { 2479 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2480 "<== nxge_txdma_hw_start: NULL ring pointers")); 2481 return (NXGE_ERROR); 2482 } 2483 2484 ndmas = tx_rings->ndmas; 2485 if (!ndmas) { 2486 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2487 "<== nxge_txdma_hw_start: no dma channel allocated")); 2488 return (NXGE_ERROR); 2489 } 2490 2491 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2492 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2493 tx_rings, tx_desc_rings, ndmas)); 2494 2495 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2496 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2497 2498 for (i = 0; i < ndmas; i++) { 2499 channel = tx_desc_rings[i]->tdc, 2500 status = nxge_txdma_start_channel(nxgep, channel, 2501 (p_tx_ring_t)tx_desc_rings[i], 2502 (p_tx_mbox_t)tx_mbox_p[i]); 2503 if (status != NXGE_OK) { 2504 goto nxge_txdma_hw_start_fail1; 2505 } 2506 } 2507 2508 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2509 "tx_rings $%p rings $%p", 2510 nxgep->tx_rings, nxgep->tx_rings->rings)); 2511 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2512 "tx_rings $%p tx_desc_rings $%p", 2513 nxgep->tx_rings, tx_desc_rings)); 2514 2515 goto nxge_txdma_hw_start_exit; 2516 2517 nxge_txdma_hw_start_fail1: 2518 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2519 "==> nxge_txdma_hw_start: disable " 2520 "(status 0x%x channel %d i %d)", status, channel, i)); 2521 for (; i >= 0; i--) { 2522 channel = tx_desc_rings[i]->tdc, 2523 (void) nxge_txdma_stop_channel(nxgep, channel, 2524 (p_tx_ring_t)tx_desc_rings[i], 2525 (p_tx_mbox_t)tx_mbox_p[i]); 2526 } 2527 2528 nxge_txdma_hw_start_exit: 2529 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2530 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2531 2532 return (status); 2533 } 2534 2535 static void 2536 nxge_txdma_hw_stop(p_nxge_t nxgep) 2537 { 2538 int i, ndmas; 2539 uint16_t channel; 2540 p_tx_rings_t tx_rings; 2541 p_tx_ring_t *tx_desc_rings; 2542 p_tx_mbox_areas_t tx_mbox_areas_p; 2543 p_tx_mbox_t *tx_mbox_p; 2544 2545 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop")); 2546 2547 tx_rings = nxgep->tx_rings; 2548 if (tx_rings == NULL) { 2549 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2550 "<== nxge_txdma_hw_stop: NULL ring pointer")); 2551 return; 2552 } 2553 tx_desc_rings = tx_rings->rings; 2554 if (tx_desc_rings == NULL) { 2555 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2556 "<== nxge_txdma_hw_stop: NULL ring pointers")); 2557 return; 2558 } 2559 2560 ndmas = tx_rings->ndmas; 2561 if (!ndmas) { 2562 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2563 "<== nxge_txdma_hw_stop: no dma channel allocated")); 2564 return; 2565 } 2566 2567 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2568 "tx_rings $%p tx_desc_rings $%p", 2569 tx_rings, tx_desc_rings)); 2570 2571 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2572 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2573 2574 for (i = 0; i < ndmas; i++) { 2575 channel = tx_desc_rings[i]->tdc; 2576 (void) nxge_txdma_stop_channel(nxgep, channel, 2577 (p_tx_ring_t)tx_desc_rings[i], 2578 (p_tx_mbox_t)tx_mbox_p[i]); 2579 } 2580 2581 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2582 "tx_rings $%p tx_desc_rings $%p", 2583 tx_rings, tx_desc_rings)); 2584 2585 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop")); 2586 } 2587 2588 static nxge_status_t 2589 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2590 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2591 2592 { 2593 nxge_status_t status = NXGE_OK; 2594 2595 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2596 "==> nxge_txdma_start_channel (channel %d)", channel)); 2597 /* 2598 * TXDMA/TXC must be in stopped state. 2599 */ 2600 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2601 2602 /* 2603 * Reset TXDMA channel 2604 */ 2605 tx_ring_p->tx_cs.value = 0; 2606 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2607 status = nxge_reset_txdma_channel(nxgep, channel, 2608 tx_ring_p->tx_cs.value); 2609 if (status != NXGE_OK) { 2610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2611 "==> nxge_txdma_start_channel (channel %d)" 2612 " reset channel failed 0x%x", channel, status)); 2613 goto nxge_txdma_start_channel_exit; 2614 } 2615 2616 /* 2617 * Initialize the TXDMA channel specific FZC control 2618 * configurations. These FZC registers are pertaining 2619 * to each TX channel (i.e. logical pages). 2620 */ 2621 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2622 tx_ring_p, tx_mbox_p); 2623 if (status != NXGE_OK) { 2624 goto nxge_txdma_start_channel_exit; 2625 } 2626 2627 /* 2628 * Initialize the event masks. 2629 */ 2630 tx_ring_p->tx_evmask.value = 0; 2631 status = nxge_init_txdma_channel_event_mask(nxgep, 2632 channel, &tx_ring_p->tx_evmask); 2633 if (status != NXGE_OK) { 2634 goto nxge_txdma_start_channel_exit; 2635 } 2636 2637 /* 2638 * Load TXDMA descriptors, buffers, mailbox, 2639 * initialise the DMA channels and 2640 * enable each DMA channel. 2641 */ 2642 status = nxge_enable_txdma_channel(nxgep, channel, 2643 tx_ring_p, tx_mbox_p); 2644 if (status != NXGE_OK) { 2645 goto nxge_txdma_start_channel_exit; 2646 } 2647 2648 nxge_txdma_start_channel_exit: 2649 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2650 2651 return (status); 2652 } 2653 2654 /*ARGSUSED*/ 2655 static nxge_status_t 2656 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel, 2657 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2658 { 2659 int status = NXGE_OK; 2660 2661 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2662 "==> nxge_txdma_stop_channel: channel %d", channel)); 2663 2664 /* 2665 * Stop (disable) TXDMA and TXC (if stop bit is set 2666 * and STOP_N_GO bit not set, the TXDMA reset state will 2667 * not be set if reset TXDMA. 2668 */ 2669 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2670 2671 /* 2672 * Reset TXDMA channel 2673 */ 2674 tx_ring_p->tx_cs.value = 0; 2675 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2676 status = nxge_reset_txdma_channel(nxgep, channel, 2677 tx_ring_p->tx_cs.value); 2678 if (status != NXGE_OK) { 2679 goto nxge_txdma_stop_channel_exit; 2680 } 2681 2682 #ifdef HARDWARE_REQUIRED 2683 /* Set up the interrupt event masks. */ 2684 tx_ring_p->tx_evmask.value = 0; 2685 status = nxge_init_txdma_channel_event_mask(nxgep, 2686 channel, &tx_ring_p->tx_evmask); 2687 if (status != NXGE_OK) { 2688 goto nxge_txdma_stop_channel_exit; 2689 } 2690 2691 /* Initialize the DMA control and status register */ 2692 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2693 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2694 tx_ring_p->tx_cs.value); 2695 if (status != NXGE_OK) { 2696 goto nxge_txdma_stop_channel_exit; 2697 } 2698 2699 /* Disable channel */ 2700 status = nxge_disable_txdma_channel(nxgep, channel, 2701 tx_ring_p, tx_mbox_p); 2702 if (status != NXGE_OK) { 2703 goto nxge_txdma_start_channel_exit; 2704 } 2705 2706 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2707 "==> nxge_txdma_stop_channel: event done")); 2708 2709 #endif 2710 2711 nxge_txdma_stop_channel_exit: 2712 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2713 return (status); 2714 } 2715 2716 static p_tx_ring_t 2717 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2718 { 2719 int index, ndmas; 2720 uint16_t tdc; 2721 p_tx_rings_t tx_rings; 2722 2723 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2724 2725 tx_rings = nxgep->tx_rings; 2726 if (tx_rings == NULL) { 2727 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2728 "<== nxge_txdma_get_ring: NULL ring pointer")); 2729 return (NULL); 2730 } 2731 2732 ndmas = tx_rings->ndmas; 2733 if (!ndmas) { 2734 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2735 "<== nxge_txdma_get_ring: no channel allocated")); 2736 return (NULL); 2737 } 2738 2739 if (tx_rings->rings == NULL) { 2740 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2741 "<== nxge_txdma_get_ring: NULL rings pointer")); 2742 return (NULL); 2743 } 2744 2745 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: " 2746 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2747 tx_rings, tx_rings, ndmas)); 2748 2749 for (index = 0; index < ndmas; index++) { 2750 tdc = tx_rings->rings[index]->tdc; 2751 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2752 "==> nxge_fixup_txdma_rings: channel %d", tdc)); 2753 if (channel == tdc) { 2754 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2755 "<== nxge_txdma_get_ring: tdc %d " 2756 "ring $%p", 2757 tdc, tx_rings->rings[index])); 2758 return (p_tx_ring_t)(tx_rings->rings[index]); 2759 } 2760 } 2761 2762 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring")); 2763 return (NULL); 2764 } 2765 2766 static p_tx_mbox_t 2767 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 2768 { 2769 int index, tdc, ndmas; 2770 p_tx_rings_t tx_rings; 2771 p_tx_mbox_areas_t tx_mbox_areas_p; 2772 p_tx_mbox_t *tx_mbox_p; 2773 2774 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 2775 2776 tx_rings = nxgep->tx_rings; 2777 if (tx_rings == NULL) { 2778 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2779 "<== nxge_txdma_get_mbox: NULL ring pointer")); 2780 return (NULL); 2781 } 2782 2783 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2784 if (tx_mbox_areas_p == NULL) { 2785 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2786 "<== nxge_txdma_get_mbox: NULL mbox pointer")); 2787 return (NULL); 2788 } 2789 2790 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2791 2792 ndmas = tx_rings->ndmas; 2793 if (!ndmas) { 2794 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2795 "<== nxge_txdma_get_mbox: no channel allocated")); 2796 return (NULL); 2797 } 2798 2799 if (tx_rings->rings == NULL) { 2800 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2801 "<== nxge_txdma_get_mbox: NULL rings pointer")); 2802 return (NULL); 2803 } 2804 2805 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: " 2806 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2807 tx_rings, tx_rings, ndmas)); 2808 2809 for (index = 0; index < ndmas; index++) { 2810 tdc = tx_rings->rings[index]->tdc; 2811 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2812 "==> nxge_txdma_get_mbox: channel %d", tdc)); 2813 if (channel == tdc) { 2814 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2815 "<== nxge_txdma_get_mbox: tdc %d " 2816 "ring $%p", 2817 tdc, tx_rings->rings[index])); 2818 return (p_tx_mbox_t)(tx_mbox_p[index]); 2819 } 2820 } 2821 2822 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox")); 2823 return (NULL); 2824 } 2825 2826 /*ARGSUSED*/ 2827 static nxge_status_t 2828 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 2829 { 2830 npi_handle_t handle; 2831 npi_status_t rs; 2832 uint8_t channel; 2833 p_tx_ring_t *tx_rings; 2834 p_tx_ring_t tx_ring_p; 2835 p_nxge_tx_ring_stats_t tdc_stats; 2836 boolean_t txchan_fatal = B_FALSE; 2837 nxge_status_t status = NXGE_OK; 2838 tdmc_inj_par_err_t par_err; 2839 uint32_t value; 2840 2841 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts")); 2842 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2843 channel = ldvp->channel; 2844 2845 tx_rings = nxgep->tx_rings->rings; 2846 tx_ring_p = tx_rings[index]; 2847 tdc_stats = tx_ring_p->tdc_stats; 2848 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 2849 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 2850 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 2851 if ((rs = npi_txdma_ring_error_get(handle, channel, 2852 &tdc_stats->errlog)) != NPI_SUCCESS) 2853 return (NXGE_ERROR | rs); 2854 } 2855 2856 if (cs.bits.ldw.mbox_err) { 2857 tdc_stats->mbox_err++; 2858 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2859 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 2860 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2861 "==> nxge_tx_err_evnts(channel %d): " 2862 "fatal error: mailbox", channel)); 2863 txchan_fatal = B_TRUE; 2864 } 2865 if (cs.bits.ldw.pkt_size_err) { 2866 tdc_stats->pkt_size_err++; 2867 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2868 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2869 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2870 "==> nxge_tx_err_evnts(channel %d): " 2871 "fatal error: pkt_size_err", channel)); 2872 txchan_fatal = B_TRUE; 2873 } 2874 if (cs.bits.ldw.tx_ring_oflow) { 2875 tdc_stats->tx_ring_oflow++; 2876 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2877 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 2878 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2879 "==> nxge_tx_err_evnts(channel %d): " 2880 "fatal error: tx_ring_oflow", channel)); 2881 txchan_fatal = B_TRUE; 2882 } 2883 if (cs.bits.ldw.pref_buf_par_err) { 2884 tdc_stats->pre_buf_par_err++; 2885 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2886 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 2887 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2888 "==> nxge_tx_err_evnts(channel %d): " 2889 "fatal error: pre_buf_par_err", channel)); 2890 /* Clear error injection source for parity error */ 2891 (void) npi_txdma_inj_par_error_get(handle, &value); 2892 par_err.value = value; 2893 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 2894 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 2895 txchan_fatal = B_TRUE; 2896 } 2897 if (cs.bits.ldw.nack_pref) { 2898 tdc_stats->nack_pref++; 2899 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2900 NXGE_FM_EREPORT_TDMC_NACK_PREF); 2901 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2902 "==> nxge_tx_err_evnts(channel %d): " 2903 "fatal error: nack_pref", channel)); 2904 txchan_fatal = B_TRUE; 2905 } 2906 if (cs.bits.ldw.nack_pkt_rd) { 2907 tdc_stats->nack_pkt_rd++; 2908 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2909 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 2910 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2911 "==> nxge_tx_err_evnts(channel %d): " 2912 "fatal error: nack_pkt_rd", channel)); 2913 txchan_fatal = B_TRUE; 2914 } 2915 if (cs.bits.ldw.conf_part_err) { 2916 tdc_stats->conf_part_err++; 2917 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2918 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 2919 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2920 "==> nxge_tx_err_evnts(channel %d): " 2921 "fatal error: config_partition_err", channel)); 2922 txchan_fatal = B_TRUE; 2923 } 2924 if (cs.bits.ldw.pkt_prt_err) { 2925 tdc_stats->pkt_part_err++; 2926 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2927 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 2928 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2929 "==> nxge_tx_err_evnts(channel %d): " 2930 "fatal error: pkt_prt_err", channel)); 2931 txchan_fatal = B_TRUE; 2932 } 2933 2934 /* Clear error injection source in case this is an injected error */ 2935 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 2936 2937 if (txchan_fatal) { 2938 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2939 " nxge_tx_err_evnts: " 2940 " fatal error on channel %d cs 0x%llx\n", 2941 channel, cs.value)); 2942 status = nxge_txdma_fatal_err_recover(nxgep, channel, 2943 tx_ring_p); 2944 if (status == NXGE_OK) { 2945 FM_SERVICE_RESTORED(nxgep); 2946 } 2947 } 2948 2949 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts")); 2950 2951 return (status); 2952 } 2953 2954 static nxge_status_t 2955 nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel, 2956 p_tx_ring_t tx_ring_p) 2957 { 2958 npi_handle_t handle; 2959 npi_status_t rs = NPI_SUCCESS; 2960 p_tx_mbox_t tx_mbox_p; 2961 nxge_status_t status = NXGE_OK; 2962 2963 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 2964 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2965 "Recovering from TxDMAChannel#%d error...", channel)); 2966 2967 /* 2968 * Stop the dma channel waits for the stop done. 2969 * If the stop done bit is not set, then create 2970 * an error. 2971 */ 2972 2973 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2974 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 2975 MUTEX_ENTER(&tx_ring_p->lock); 2976 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 2977 if (rs != NPI_SUCCESS) { 2978 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2979 "==> nxge_txdma_fatal_err_recover (channel %d): " 2980 "stop failed ", channel)); 2981 goto fail; 2982 } 2983 2984 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 2985 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2986 2987 /* 2988 * Reset TXDMA channel 2989 */ 2990 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 2991 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2992 NPI_SUCCESS) { 2993 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2994 "==> nxge_txdma_fatal_err_recover (channel %d)" 2995 " reset channel failed 0x%x", channel, rs)); 2996 goto fail; 2997 } 2998 2999 /* 3000 * Reset the tail (kick) register to 0. 3001 * (Hardware will not reset it. Tx overflow fatal 3002 * error if tail is not set to 0 after reset! 3003 */ 3004 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3005 3006 /* Restart TXDMA channel */ 3007 3008 /* 3009 * Initialize the TXDMA channel specific FZC control 3010 * configurations. These FZC registers are pertaining 3011 * to each TX channel (i.e. logical pages). 3012 */ 3013 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3014 3015 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3016 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3017 tx_ring_p, tx_mbox_p); 3018 if (status != NXGE_OK) 3019 goto fail; 3020 3021 /* 3022 * Initialize the event masks. 3023 */ 3024 tx_ring_p->tx_evmask.value = 0; 3025 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3026 &tx_ring_p->tx_evmask); 3027 if (status != NXGE_OK) 3028 goto fail; 3029 3030 tx_ring_p->wr_index_wrap = B_FALSE; 3031 tx_ring_p->wr_index = 0; 3032 tx_ring_p->rd_index = 0; 3033 3034 /* 3035 * Load TXDMA descriptors, buffers, mailbox, 3036 * initialise the DMA channels and 3037 * enable each DMA channel. 3038 */ 3039 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3040 status = nxge_enable_txdma_channel(nxgep, channel, 3041 tx_ring_p, tx_mbox_p); 3042 MUTEX_EXIT(&tx_ring_p->lock); 3043 if (status != NXGE_OK) 3044 goto fail; 3045 3046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3047 "Recovery Successful, TxDMAChannel#%d Restored", 3048 channel)); 3049 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3050 3051 return (NXGE_OK); 3052 3053 fail: 3054 MUTEX_EXIT(&tx_ring_p->lock); 3055 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3056 "nxge_txdma_fatal_err_recover (channel %d): " 3057 "failed to recover this txdma channel", channel)); 3058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3059 3060 return (status); 3061 } 3062 3063 nxge_status_t 3064 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3065 { 3066 npi_handle_t handle; 3067 npi_status_t rs = NPI_SUCCESS; 3068 nxge_status_t status = NXGE_OK; 3069 p_tx_ring_t *tx_desc_rings; 3070 p_tx_rings_t tx_rings; 3071 p_tx_ring_t tx_ring_p; 3072 p_tx_mbox_t tx_mbox_p; 3073 int i, ndmas; 3074 uint16_t channel; 3075 3076 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3078 "Recovering from TxPort error...")); 3079 3080 /* 3081 * Stop the dma channel waits for the stop done. 3082 * If the stop done bit is not set, then create 3083 * an error. 3084 */ 3085 3086 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3087 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels...")); 3088 3089 tx_rings = nxgep->tx_rings; 3090 tx_desc_rings = tx_rings->rings; 3091 ndmas = tx_rings->ndmas; 3092 3093 for (i = 0; i < ndmas; i++) { 3094 if (tx_desc_rings[i] == NULL) { 3095 continue; 3096 } 3097 tx_ring_p = tx_rings->rings[i]; 3098 MUTEX_ENTER(&tx_ring_p->lock); 3099 } 3100 3101 for (i = 0; i < ndmas; i++) { 3102 if (tx_desc_rings[i] == NULL) { 3103 continue; 3104 } 3105 channel = tx_desc_rings[i]->tdc; 3106 tx_ring_p = tx_rings->rings[i]; 3107 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3108 if (rs != NPI_SUCCESS) { 3109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3110 "==> nxge_txdma_fatal_err_recover (channel %d): " 3111 "stop failed ", channel)); 3112 goto fail; 3113 } 3114 } 3115 3116 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels...")); 3117 3118 for (i = 0; i < ndmas; i++) { 3119 if (tx_desc_rings[i] == NULL) { 3120 continue; 3121 } 3122 tx_ring_p = tx_rings->rings[i]; 3123 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3124 } 3125 3126 /* 3127 * Reset TXDMA channel 3128 */ 3129 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels...")); 3130 3131 for (i = 0; i < ndmas; i++) { 3132 if (tx_desc_rings[i] == NULL) { 3133 continue; 3134 } 3135 channel = tx_desc_rings[i]->tdc; 3136 tx_ring_p = tx_rings->rings[i]; 3137 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, 3138 channel)) != NPI_SUCCESS) { 3139 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3140 "==> nxge_txdma_fatal_err_recover (channel %d)" 3141 " reset channel failed 0x%x", channel, rs)); 3142 goto fail; 3143 } 3144 3145 /* 3146 * Reset the tail (kick) register to 0. 3147 * (Hardware will not reset it. Tx overflow fatal 3148 * error if tail is not set to 0 after reset! 3149 */ 3150 3151 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3152 3153 } 3154 3155 /* 3156 * Initialize the TXDMA channel specific FZC control 3157 * configurations. These FZC registers are pertaining 3158 * to each TX channel (i.e. logical pages). 3159 */ 3160 3161 /* Restart TXDMA channels */ 3162 3163 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels...")); 3164 3165 for (i = 0; i < ndmas; i++) { 3166 if (tx_desc_rings[i] == NULL) { 3167 continue; 3168 } 3169 channel = tx_desc_rings[i]->tdc; 3170 tx_ring_p = tx_rings->rings[i]; 3171 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3172 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3173 tx_ring_p, tx_mbox_p); 3174 tx_ring_p->tx_evmask.value = 0; 3175 /* 3176 * Initialize the event masks. 3177 */ 3178 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3179 &tx_ring_p->tx_evmask); 3180 3181 tx_ring_p->wr_index_wrap = B_FALSE; 3182 tx_ring_p->wr_index = 0; 3183 tx_ring_p->rd_index = 0; 3184 3185 if (status != NXGE_OK) 3186 goto fail; 3187 if (status != NXGE_OK) 3188 goto fail; 3189 } 3190 3191 /* 3192 * Load TXDMA descriptors, buffers, mailbox, 3193 * initialise the DMA channels and 3194 * enable each DMA channel. 3195 */ 3196 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels...")); 3197 3198 for (i = 0; i < ndmas; i++) { 3199 if (tx_desc_rings[i] == NULL) { 3200 continue; 3201 } 3202 channel = tx_desc_rings[i]->tdc; 3203 tx_ring_p = tx_rings->rings[i]; 3204 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3205 status = nxge_enable_txdma_channel(nxgep, channel, 3206 tx_ring_p, tx_mbox_p); 3207 if (status != NXGE_OK) 3208 goto fail; 3209 } 3210 3211 for (i = 0; i < ndmas; i++) { 3212 if (tx_desc_rings[i] == NULL) { 3213 continue; 3214 } 3215 tx_ring_p = tx_rings->rings[i]; 3216 MUTEX_EXIT(&tx_ring_p->lock); 3217 } 3218 3219 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3220 "Recovery Successful, TxPort Restored")); 3221 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3222 3223 return (NXGE_OK); 3224 3225 fail: 3226 for (i = 0; i < ndmas; i++) { 3227 if (tx_desc_rings[i] == NULL) { 3228 continue; 3229 } 3230 tx_ring_p = tx_rings->rings[i]; 3231 MUTEX_EXIT(&tx_ring_p->lock); 3232 } 3233 3234 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3235 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3236 "nxge_txdma_fatal_err_recover (channel %d): " 3237 "failed to recover this txdma channel")); 3238 3239 return (status); 3240 } 3241 3242 void 3243 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3244 { 3245 tdmc_intr_dbg_t tdi; 3246 tdmc_inj_par_err_t par_err; 3247 uint32_t value; 3248 npi_handle_t handle; 3249 3250 switch (err_id) { 3251 3252 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3253 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3254 /* Clear error injection source for parity error */ 3255 (void) npi_txdma_inj_par_error_get(handle, &value); 3256 par_err.value = value; 3257 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3258 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3259 3260 par_err.bits.ldw.inject_parity_error = (1 << chan); 3261 (void) npi_txdma_inj_par_error_get(handle, &value); 3262 par_err.value = value; 3263 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3264 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3265 (unsigned long long)par_err.value); 3266 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3267 break; 3268 3269 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3270 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3271 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3272 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3273 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3274 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3275 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3276 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3277 chan, &tdi.value); 3278 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3279 tdi.bits.ldw.pref_buf_par_err = 1; 3280 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3281 tdi.bits.ldw.mbox_err = 1; 3282 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3283 tdi.bits.ldw.nack_pref = 1; 3284 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3285 tdi.bits.ldw.nack_pkt_rd = 1; 3286 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3287 tdi.bits.ldw.pkt_size_err = 1; 3288 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3289 tdi.bits.ldw.tx_ring_oflow = 1; 3290 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3291 tdi.bits.ldw.conf_part_err = 1; 3292 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3293 tdi.bits.ldw.pkt_part_err = 1; 3294 #if defined(__i386) 3295 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3296 tdi.value); 3297 #else 3298 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3299 tdi.value); 3300 #endif 3301 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3302 chan, tdi.value); 3303 3304 break; 3305 } 3306 } 3307